mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 14:40:01 +00:00
Compare commits
3 Commits
docs/vecto
...
avoid-quer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bfba48755 | ||
|
|
457998f0fe | ||
|
|
b02c256157 |
@@ -2,16 +2,4 @@
|
|||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
sqlness = "run --bin sqlness-runner --"
|
||||||
|
|
||||||
[unstable.git]
|
|
||||||
shallow_index = true
|
|
||||||
shallow_deps = true
|
|
||||||
[unstable.gitoxide]
|
|
||||||
fetch = true
|
|
||||||
checkout = true
|
|
||||||
list_files = true
|
|
||||||
internal_use_git2 = false
|
|
||||||
|
|
||||||
[env]
|
|
||||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
|||||||
GT_GCS_BUCKET = GCS bucket
|
GT_GCS_BUCKET = GCS bucket
|
||||||
GT_GCS_SCOPE = GCS scope
|
GT_GCS_SCOPE = GCS scope
|
||||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||||
GT_GCS_CREDENTIAL = GCS credential
|
|
||||||
GT_GCS_ENDPOINT = GCS end point
|
GT_GCS_ENDPOINT = GCS end point
|
||||||
# Settings for kafka wal test
|
# Settings for kafka wal test
|
||||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||||
@@ -29,8 +28,3 @@ GT_MYSQL_ADDR = localhost:4002
|
|||||||
# Setting for unstable fuzz tests
|
# Setting for unstable fuzz tests
|
||||||
GT_FUZZ_BINARY_PATH=/path/to/
|
GT_FUZZ_BINARY_PATH=/path/to/
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
|
||||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
|
||||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
|
||||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
|
||||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
|
||||||
|
|||||||
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@@ -4,24 +4,24 @@
|
|||||||
|
|
||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Database Engine
|
## [Module] Databse Engine
|
||||||
/src/index @evenyag @discord9 @WenyXu
|
/src/index @zhongzc
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag @waynexia @discord9
|
/src/query @evenyag
|
||||||
|
|
||||||
## [Module] Distributed
|
## [Module] Distributed
|
||||||
/src/common/meta @MichaelScofield @WenyXu
|
/src/common/meta @MichaelScofield
|
||||||
/src/common/procedure @MichaelScofield @WenyXu
|
/src/common/procedure @MichaelScofield
|
||||||
/src/meta-client @MichaelScofield @WenyXu
|
/src/meta-client @MichaelScofield
|
||||||
/src/meta-srv @MichaelScofield @WenyXu
|
/src/meta-srv @MichaelScofield
|
||||||
|
|
||||||
## [Module] Write Ahead Log
|
## [Module] Write Ahead Log
|
||||||
/src/log-store @v0y4g3r @WenyXu
|
/src/log-store @v0y4g3r
|
||||||
/src/store-api @v0y4g3r @evenyag
|
/src/store-api @v0y4g3r
|
||||||
|
|
||||||
## [Module] Metrics Engine
|
## [Module] Metrics Engine
|
||||||
/src/metric-engine @waynexia @WenyXu
|
/src/metric-engine @waynexia
|
||||||
/src/promql @waynexia @evenyag @discord9
|
/src/promql @waynexia
|
||||||
|
|
||||||
## [Module] Flow
|
## [Module] Flow
|
||||||
/src/flow @discord9 @waynexia
|
/src/flow @zhongzc @waynexia
|
||||||
|
|||||||
@@ -41,14 +41,7 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Build and push dev-builder-ubuntu image
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
# The latest version will lead to segmentation fault.
|
|
||||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -57,9 +50,9 @@ runs:
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
- name: Build and push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -68,7 +61,7 @@ runs:
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -76,7 +69,8 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }} && \
|
||||||
|
|
||||||
|
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||||
|
|||||||
35
.github/actions/build-greptime-binary/action.yml
vendored
35
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,31 +24,9 @@ inputs:
|
|||||||
description: Build android artifacts
|
description: Build android artifacts
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
image-namespace:
|
|
||||||
description: Image Namespace
|
|
||||||
required: false
|
|
||||||
default: 'greptime'
|
|
||||||
image-registry:
|
|
||||||
description: Image Registry
|
|
||||||
required: false
|
|
||||||
default: 'docker.io'
|
|
||||||
large-page-size:
|
|
||||||
description: Build GreptimeDB with large page size (65536).
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Set extra build environment variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
|
||||||
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
|
||||||
else
|
|
||||||
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
@@ -57,10 +35,7 @@ runs:
|
|||||||
make build-by-dev-builder \
|
make build-by-dev-builder \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }} \
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
|
||||||
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
@@ -69,7 +44,7 @@ runs:
|
|||||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: ./target/$PROFILE_TARGET/greptime
|
target-file: ./target/$PROFILE_TARGET/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
@@ -78,15 +53,13 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
|
||||||
|
|
||||||
- name: Upload android artifacts
|
- name: Upload android artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: ./target/aarch64-linux-android/release/greptime
|
target-file: ./target/aarch64-linux-android/release/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|||||||
10
.github/actions/build-greptime-images/action.yml
vendored
10
.github/actions/build-greptime-images/action.yml
vendored
@@ -34,8 +34,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -47,11 +47,7 @@ runs:
|
|||||||
password: ${{ inputs.image-registry-password }}
|
password: ${{ inputs.image-registry-password }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Set up qemu for multi-platform builds
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
# The latest version will lead to segmentation fault.
|
|
||||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
|
||||||
|
|
||||||
- name: Set up buildx
|
- name: Set up buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|||||||
8
.github/actions/build-images/action.yml
vendored
8
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
@@ -41,8 +41,8 @@ runs:
|
|||||||
image-name: ${{ inputs.image-name }}
|
image-name: ${{ inputs.image-name }}
|
||||||
image-tag: ${{ inputs.version }}
|
image-tag: ${{ inputs.version }}
|
||||||
docker-file: docker/ci/ubuntu/Dockerfile
|
docker-file: docker/ci/ubuntu/Dockerfile
|
||||||
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||||
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||||
|
|
||||||
|
|||||||
36
.github/actions/build-linux-artifacts/action.yml
vendored
36
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -17,20 +17,10 @@ inputs:
|
|||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
default: "false"
|
default: "false"
|
||||||
image-namespace:
|
|
||||||
description: Image Namespace
|
|
||||||
required: true
|
|
||||||
image-registry:
|
|
||||||
description: Image Registry
|
|
||||||
required: true
|
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
default: .
|
default: .
|
||||||
large-page-size:
|
|
||||||
description: Build GreptimeDB with large page size (65536).
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -40,9 +30,7 @@ runs:
|
|||||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && \
|
cd ${{ inputs.working-dir }} && \
|
||||||
make run-it-in-container BUILD_JOBS=4 \
|
make run-it-in-container BUILD_JOBS=4
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||||
@@ -52,7 +40,18 @@ runs:
|
|||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime # Builds standard greptime binary
|
- name: Build standard greptime
|
||||||
|
uses: ./.github/actions/build-greptime-binary
|
||||||
|
with:
|
||||||
|
base-image: ubuntu
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
|
- name: Build greptime without pyo3
|
||||||
|
if: ${{ inputs.dev-mode == 'false' }}
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
@@ -61,9 +60,6 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -80,9 +76,6 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
@@ -93,6 +86,3 @@ runs:
|
|||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|||||||
19
.github/actions/build-macos-artifacts/action.yml
vendored
19
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -4,6 +4,9 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
|
rust-toolchain:
|
||||||
|
description: Rust toolchain to use
|
||||||
|
required: true
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -40,9 +43,10 @@ runs:
|
|||||||
brew install protobuf
|
brew install protobuf
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
target: ${{ inputs.arch }}
|
toolchain: ${{ inputs.rust-toolchain }}
|
||||||
|
targets: ${{ inputs.arch }}
|
||||||
|
|
||||||
- name: Start etcd # For integration tests.
|
- name: Start etcd # For integration tests.
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
@@ -55,16 +59,9 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
|
||||||
# linker that prevents backtraces from getting printed correctly.
|
|
||||||
#
|
|
||||||
# <https://github.com/rust-lang/rust/issues/113783>
|
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
run: |
|
run: |
|
||||||
make test sqlness-test
|
make test sqlness-test
|
||||||
|
|
||||||
@@ -78,8 +75,6 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
|
||||||
run: |
|
run: |
|
||||||
make build \
|
make build \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
@@ -90,5 +85,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
|
rust-toolchain:
|
||||||
|
description: Rust toolchain to use
|
||||||
|
required: true
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -25,14 +28,24 @@ runs:
|
|||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
target: ${{ inputs.arch }}
|
toolchain: ${{ inputs.rust-toolchain }}
|
||||||
|
targets: ${{ inputs.arch }}
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
|
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
shell: pwsh
|
||||||
|
run: pip install pyarrow
|
||||||
|
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
@@ -47,15 +60,15 @@ runs:
|
|||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
env:
|
env:
|
||||||
|
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
@@ -66,5 +79,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
|
|||||||
22
.github/actions/release-cn-artifacts/action.yaml
vendored
22
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
upload-to-s3:
|
upload-to-s3:
|
||||||
description: Upload to S3
|
description: Upload to S3
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -64,11 +64,11 @@ inputs:
|
|||||||
upload-max-retry-times:
|
upload-max-retry-times:
|
||||||
description: Max retry times for uploading artifacts to S3
|
description: Max retry times for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "30"
|
default: "20"
|
||||||
upload-retry-timeout:
|
upload-retry-timeout:
|
||||||
description: Timeout for uploading artifacts to S3
|
description: Timeout for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "120" # minutes
|
default: "30" # minutes
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -77,21 +77,13 @@ runs:
|
|||||||
with:
|
with:
|
||||||
path: ${{ inputs.artifacts-dir }}
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
- name: Install s5cmd
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
|
||||||
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
|
||||||
sudo mv s5cmd /usr/local/bin/
|
|
||||||
sudo chmod +x /usr/local/bin/s5cmd
|
|
||||||
|
|
||||||
- name: Release artifacts to cn region
|
- name: Release artifacts to cn region
|
||||||
uses: nick-invision/retry@v2
|
uses: nick-invision/retry@v2
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||||
env:
|
env:
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||||
AWS_REGION: ${{ inputs.aws-cn-region }}
|
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||||
with:
|
with:
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||||
@@ -131,10 +123,10 @@ runs:
|
|||||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
run: |
|
run: |
|
||||||
./.github/scripts/copy-image.sh \
|
./.github/scripts/copy-image.sh \
|
||||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||||
env:
|
env:
|
||||||
|
|||||||
17
.github/actions/setup-chaos/action.yml
vendored
17
.github/actions/setup-chaos/action.yml
vendored
@@ -1,17 +0,0 @@
|
|||||||
name: Setup Kind
|
|
||||||
description: Deploy Kind
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Create kind cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
|
||||||
kubectl create ns chaos-mesh
|
|
||||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
|
||||||
- name: Print Chaos-mesh
|
|
||||||
if: always()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl get po -n chaos-mesh
|
|
||||||
@@ -2,7 +2,7 @@ name: Setup Etcd cluster
|
|||||||
description: Deploy Etcd cluster on Kubernetes
|
description: Deploy Etcd cluster on Kubernetes
|
||||||
inputs:
|
inputs:
|
||||||
etcd-replicas:
|
etcd-replicas:
|
||||||
default: 1
|
default: 3
|
||||||
description: "Etcd replicas"
|
description: "Etcd replicas"
|
||||||
namespace:
|
namespace:
|
||||||
default: "etcd-cluster"
|
default: "etcd-cluster"
|
||||||
@@ -18,15 +18,8 @@ runs:
|
|||||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||||
--set resources.requests.cpu=50m \
|
--set resources.requests.cpu=50m \
|
||||||
--set resources.requests.memory=128Mi \
|
--set resources.requests.memory=128Mi \
|
||||||
--set resources.limits.cpu=1500m \
|
|
||||||
--set resources.limits.memory=2Gi \
|
|
||||||
--set auth.rbac.create=false \
|
--set auth.rbac.create=false \
|
||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set persistence.size=2Gi \
|
--set persistence.size=2Gi \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/etcd \
|
|
||||||
--set image.tag=3.6.1-debian-12-r3 \
|
|
||||||
--version 12.0.8 \
|
|
||||||
-n ${{ inputs.namespace }}
|
-n ${{ inputs.namespace }}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ inputs:
|
|||||||
default: 2
|
default: 2
|
||||||
description: "Number of Datanode replicas"
|
description: "Number of Datanode replicas"
|
||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 2
|
default: 3
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
@@ -22,51 +22,41 @@ inputs:
|
|||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||||
description: "Etcd endpoints"
|
description: "Etcd endpoints"
|
||||||
values-filename:
|
|
||||||
default: "with-minio.yaml"
|
|
||||||
enable-region-failover:
|
|
||||||
default: false
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
uses: nick-fields/retry@v3
|
shell: bash
|
||||||
with:
|
run: |
|
||||||
timeout_minutes: 3
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
max_attempts: 3
|
helm repo update
|
||||||
shell: bash
|
helm upgrade \
|
||||||
command: |
|
--install \
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
--create-namespace \
|
||||||
helm repo update
|
greptimedb-operator greptime/greptimedb-operator \
|
||||||
helm upgrade \
|
-n greptimedb-admin \
|
||||||
--install \
|
--wait \
|
||||||
--create-namespace \
|
--wait-for-jobs
|
||||||
greptimedb-operator greptime/greptimedb-operator \
|
|
||||||
-n greptimedb-admin \
|
|
||||||
--wait \
|
|
||||||
--wait-for-jobs
|
|
||||||
- name: Install GreptimeDB cluster
|
- name: Install GreptimeDB cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
--set image.tag=${{ inputs.image-tag }} \
|
--set image.tag=${{ inputs.image-tag }} \
|
||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||||
greptime/greptimedb-cluster \
|
greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
-n my-greptimedb \
|
-n my-greptimedb \
|
||||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
|
||||||
--wait \
|
--wait \
|
||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
cache_path = "/data/greptimedb/s3cache"
|
|
||||||
cache_capacity = "256MB"
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
logging:
|
|
||||||
level: "info"
|
|
||||||
format: "json"
|
|
||||||
filters:
|
|
||||||
- log_store=debug
|
|
||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[wal]
|
|
||||||
provider = "kafka"
|
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
|
||||||
num_topics = 3
|
|
||||||
auto_prune_interval = "30s"
|
|
||||||
trigger_flush_threshold = 100
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
|
|
||||||
[wal]
|
|
||||||
provider = "kafka"
|
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
|
||||||
overwrite_entry_start_id = true
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
remoteWal:
|
|
||||||
enabled: true
|
|
||||||
kafka:
|
|
||||||
brokerEndpoints:
|
|
||||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
|
||||||
30
.github/actions/setup-kafka-cluster/action.yml
vendored
30
.github/actions/setup-kafka-cluster/action.yml
vendored
@@ -1,30 +0,0 @@
|
|||||||
name: Setup Kafka cluster
|
|
||||||
description: Deploy Kafka cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
controller-replicas:
|
|
||||||
default: 3
|
|
||||||
description: "Kafka controller replicas"
|
|
||||||
namespace:
|
|
||||||
default: "kafka-cluster"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Kafka cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
|
||||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
|
||||||
--set controller.resources.requests.cpu=50m \
|
|
||||||
--set controller.resources.requests.memory=128Mi \
|
|
||||||
--set controller.resources.limits.cpu=2000m \
|
|
||||||
--set controller.resources.limits.memory=2Gi \
|
|
||||||
--set listeners.controller.protocol=PLAINTEXT \
|
|
||||||
--set listeners.client.protocol=PLAINTEXT \
|
|
||||||
--create-namespace \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/kafka \
|
|
||||||
--set image.tag=3.9.0-debian-12-r1 \
|
|
||||||
--version 31.0.0 \
|
|
||||||
-n ${{ inputs.namespace }}
|
|
||||||
24
.github/actions/setup-minio/action.yml
vendored
24
.github/actions/setup-minio/action.yml
vendored
@@ -1,24 +0,0 @@
|
|||||||
name: Setup Minio cluster
|
|
||||||
description: Deploy Minio cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
replicas:
|
|
||||||
default: 1
|
|
||||||
description: "replicas"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Etcd cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm repo add minio https://charts.min.io/
|
|
||||||
helm upgrade --install minio \
|
|
||||||
--set resources.requests.memory=128Mi \
|
|
||||||
--set replicas=${{ inputs.replicas }} \
|
|
||||||
--set mode=standalone \
|
|
||||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
|
||||||
--set buckets[0].name=default \
|
|
||||||
--set service.port=80,service.targetPort=9000 \
|
|
||||||
minio/minio \
|
|
||||||
--create-namespace \
|
|
||||||
-n minio
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
name: Setup PostgreSQL
|
|
||||||
description: Deploy PostgreSQL on Kubernetes
|
|
||||||
inputs:
|
|
||||||
postgres-replicas:
|
|
||||||
default: 1
|
|
||||||
description: "Number of PostgreSQL replicas"
|
|
||||||
namespace:
|
|
||||||
default: "postgres-namespace"
|
|
||||||
description: "The PostgreSQL namespace"
|
|
||||||
storage-size:
|
|
||||||
default: "1Gi"
|
|
||||||
description: "Storage size for PostgreSQL"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install PostgreSQL
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
|
||||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/postgresql \
|
|
||||||
--set image.tag=17.5.0-debian-12-r3 \
|
|
||||||
--version 16.7.4 \
|
|
||||||
--set persistence.size=${{ inputs.storage-size }} \
|
|
||||||
--set postgresql.username=greptimedb \
|
|
||||||
--set postgresql.password=admin \
|
|
||||||
--create-namespace \
|
|
||||||
-n ${{ inputs.namespace }}
|
|
||||||
4
.github/actions/start-runner/action.yml
vendored
4
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2.3.8
|
uses: machulav/ec2-github-runner@v2
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
4
.github/actions/stop-runner/action.yml
vendored
4
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2.3.8
|
uses: machulav/ec2-github-runner@v2
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
20
.github/actions/upload-artifacts/action.yml
vendored
20
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,8 +4,8 @@ inputs:
|
|||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: true
|
required: true
|
||||||
target-files:
|
target-file:
|
||||||
description: The multiple target files to upload, separated by comma
|
description: The path of the target artifact
|
||||||
required: false
|
required: false
|
||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
@@ -18,21 +18,17 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Create artifacts directory
|
- name: Create artifacts directory
|
||||||
if: ${{ inputs.target-files != '' }}
|
if: ${{ inputs.target-file != '' }}
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
set -e
|
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||||
mkdir -p ${{ inputs.artifacts-dir }}
|
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||||
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
|
||||||
for file in "${FILES[@]}"; do
|
|
||||||
cp "$file" ${{ inputs.artifacts-dir }}/
|
|
||||||
done
|
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-v0.3.0sha256sum
|
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-v0.3.0.tar.gz
|
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-v0.3.0
|
# greptime-linux-amd64-pyo3-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
|||||||
3
.github/cargo-blacklist.txt
vendored
3
.github/cargo-blacklist.txt
vendored
@@ -1,3 +0,0 @@
|
|||||||
native-tls
|
|
||||||
openssl
|
|
||||||
aws-lc-sys
|
|
||||||
15
.github/labeler.yaml
vendored
15
.github/labeler.yaml
vendored
@@ -1,15 +0,0 @@
|
|||||||
ci:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: .github/**
|
|
||||||
|
|
||||||
docker:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docker/**
|
|
||||||
|
|
||||||
documentation:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docs/**
|
|
||||||
|
|
||||||
dashboard:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: grafana/**
|
|
||||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -4,8 +4,7 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
|
|||||||
|
|
||||||
## What's changed and what's your intention?
|
## What's changed and what's your intention?
|
||||||
|
|
||||||
<!--
|
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
|
||||||
|
|
||||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||||
|
|
||||||
@@ -13,14 +12,9 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||||
- Describe any limitations of the current code (optional)
|
- Describe any limitations of the current code (optional)
|
||||||
- Describe if this PR will break **API or data compatibility** (optional)
|
|
||||||
-->
|
|
||||||
|
|
||||||
## PR Checklist
|
## Checklist
|
||||||
Please convert it to a draft if some of the following conditions are not met.
|
|
||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
- [ ] This PR requires documentation updates.
|
- [ ] This PR requires documentation updates.
|
||||||
- [ ] API changes are backward compatible.
|
|
||||||
- [ ] Schema or data changes are backward compatible.
|
|
||||||
|
|||||||
14
.github/scripts/check-install-script.sh
vendored
14
.github/scripts/check-install-script.sh
vendored
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
|
||||||
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
|
||||||
|
|
||||||
echo "Downloading the latest version: $VERSION"
|
|
||||||
|
|
||||||
# Download the install script
|
|
||||||
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
|
||||||
|
|
||||||
# Execute the `greptime` command
|
|
||||||
./greptime --version
|
|
||||||
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Get current version
|
|
||||||
CURRENT_VERSION=$1
|
|
||||||
if [ -z "$CURRENT_VERSION" ]; then
|
|
||||||
echo "Error: Failed to get current version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version from GitHub Releases
|
|
||||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
|
||||||
|
|
||||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
|
||||||
echo "Error: Failed to fetch latest version from GitHub"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version
|
|
||||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
|
||||||
|
|
||||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
|
||||||
echo "Error: No valid version found in GitHub releases"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
|
||||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
|
|
||||||
echo "Current version: $CLEAN_CURRENT"
|
|
||||||
echo "Latest release version: $CLEAN_LATEST"
|
|
||||||
|
|
||||||
# Use sort -V to compare versions
|
|
||||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
|
||||||
|
|
||||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
|
||||||
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
|
||||||
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,25 +8,24 @@ set -e
|
|||||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||||
function create_version() {
|
function create_version() {
|
||||||
# Read from environment variables.
|
# Read from envrionment variables.
|
||||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
echo "GITHUB_EVENT_NAME is empty"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
echo "NEXT_RELEASE_VERSION is empty"
|
||||||
# NOTE: Need a `v` prefix for the version string.
|
exit 1
|
||||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||||
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||||
# It will be like 'nightly-20230808-7d0d8dc6'.
|
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -36,7 +35,7 @@ function create_version() {
|
|||||||
# It will be like 'dev-2023080819-f0e7216c'.
|
# It will be like 'dev-2023080819-f0e7216c'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||||
if [ -z "$COMMIT_SHA" ]; then
|
if [ -z "$COMMIT_SHA" ]; then
|
||||||
echo "COMMIT_SHA is empty in dev build" >&2
|
echo "COMMIT_SHA is empty in dev build"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||||
@@ -46,7 +45,7 @@ function create_version() {
|
|||||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
echo "GITHUB_REF_NAME is empty in push event"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "$GITHUB_REF_NAME"
|
echo "$GITHUB_REF_NAME"
|
||||||
@@ -55,15 +54,15 @@ function create_version() {
|
|||||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||||
else
|
else
|
||||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# You can run as following examples:
|
# You can run as following examples:
|
||||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
create_version
|
create_version
|
||||||
|
|||||||
52
.github/scripts/deploy-greptimedb.sh
vendored
52
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,18 +3,14 @@
|
|||||||
set -e
|
set -e
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
|
|
||||||
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
|
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
|
||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
|
||||||
|
|
||||||
# Create a cluster with 1 control-plane node and 5 workers.
|
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
@@ -39,16 +35,10 @@ function add_greptime_chart() {
|
|||||||
function deploy_etcd_cluster() {
|
function deploy_etcd_cluster() {
|
||||||
local namespace="$1"
|
local namespace="$1"
|
||||||
|
|
||||||
helm upgrade --install etcd "$ETCD_CHART" \
|
helm install etcd "$ETCD_CHART" \
|
||||||
--version "$ETCD_CHART_VERSION" \
|
|
||||||
--create-namespace \
|
|
||||||
--set replicaCount=3 \
|
--set replicaCount=3 \
|
||||||
--set auth.rbac.create=false \
|
--set auth.rbac.create=false \
|
||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/etcd \
|
|
||||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
|
||||||
-n "$namespace"
|
-n "$namespace"
|
||||||
|
|
||||||
# Wait for etcd cluster to be ready.
|
# Wait for etcd cluster to be ready.
|
||||||
@@ -58,9 +48,8 @@ function deploy_etcd_cluster() {
|
|||||||
# Deploy greptimedb-operator.
|
# Deploy greptimedb-operator.
|
||||||
function deploy_greptimedb_operator() {
|
function deploy_greptimedb_operator() {
|
||||||
# Use the latest chart and image.
|
# Use the latest chart and image.
|
||||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||||
--create-namespace \
|
--set image.tag=latest \
|
||||||
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
|
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
# Wait for greptimedb-operator to be ready.
|
# Wait for greptimedb-operator to be ready.
|
||||||
@@ -77,12 +66,9 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -115,18 +101,15 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
--set objectStorage.s3.region="$AWS_REGION" \
|
--set storage.credentials.secretName=s3-credentials \
|
||||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||||
--set objectStorage.credentials.secretName=s3-credentials \
|
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
|
||||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
while true; do
|
while true; do
|
||||||
@@ -151,8 +134,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
# Deploy standalone greptimedb.
|
# Deploy standalone greptimedb.
|
||||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||||
function deploy_standalone_greptimedb() {
|
function deploy_standalone_greptimedb() {
|
||||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
|||||||
507
.github/scripts/package-lock.json
generated
vendored
507
.github/scripts/package-lock.json
generated
vendored
@@ -1,507 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"lockfileVersion": 3,
|
|
||||||
"requires": true,
|
|
||||||
"packages": {
|
|
||||||
"": {
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/rest": "^21.0.0",
|
|
||||||
"axios": "^1.7.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/auth-token": {
|
|
||||||
"version": "5.1.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
|
|
||||||
"integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/core": {
|
|
||||||
"version": "6.1.6",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.6.tgz",
|
|
||||||
"integrity": "sha512-kIU8SLQkYWGp3pVKiYzA5OSaNF5EE03P/R8zEmmrG6XwOg5oBjXyQVVIauQ0dgau4zYhpZEhJrvIYt6oM+zZZA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/auth-token": "^5.0.0",
|
|
||||||
"@octokit/graphql": "^8.2.2",
|
|
||||||
"@octokit/request": "^9.2.3",
|
|
||||||
"@octokit/request-error": "^6.1.8",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"before-after-hook": "^3.0.2",
|
|
||||||
"universal-user-agent": "^7.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/endpoint": {
|
|
||||||
"version": "10.1.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.4.tgz",
|
|
||||||
"integrity": "sha512-OlYOlZIsfEVZm5HCSR8aSg02T2lbUWOsCQoPKfTXJwDzcHQBrVBGdGXb89dv2Kw2ToZaRtudp8O3ZIYoaOjKlA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"universal-user-agent": "^7.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/graphql": {
|
|
||||||
"version": "8.2.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.2.tgz",
|
|
||||||
"integrity": "sha512-Yi8hcoqsrXGdt0yObxbebHXFOiUA+2v3n53epuOg1QUgOB6c4XzvisBNVXJSl8RYA5KrDuSL2yq9Qmqe5N0ryA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/request": "^9.2.3",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"universal-user-agent": "^7.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "25.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-25.1.0.tgz",
|
|
||||||
"integrity": "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest": {
|
|
||||||
"version": "11.6.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.6.0.tgz",
|
|
||||||
"integrity": "sha512-n5KPteiF7pWKgBIBJSk8qzoZWcUkza2O6A0za97pMGVrGfPdltxrfmfF5GucHYvHGZD8BdaZmmHGz5cX/3gdpw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^13.10.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "24.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
|
||||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": {
|
|
||||||
"version": "13.10.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
|
||||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^24.2.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-request-log": {
|
|
||||||
"version": "5.3.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
|
|
||||||
"integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods": {
|
|
||||||
"version": "13.5.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.5.0.tgz",
|
|
||||||
"integrity": "sha512-9Pas60Iv9ejO3WlAX3maE1+38c5nqbJXV5GrncEfkndIpZrJ/WPMRd2xYDcPPEt5yzpxcjw9fWNoPhsSGzqKqw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^13.10.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "24.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
|
||||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/types": {
|
|
||||||
"version": "13.10.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
|
||||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^24.2.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/request": {
|
|
||||||
"version": "9.2.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.4.tgz",
|
|
||||||
"integrity": "sha512-q8ybdytBmxa6KogWlNa818r0k1wlqzNC+yNkcQDECHvQo8Vmstrg18JwqJHdJdUiHD2sjlwBgSm9kHkOKe2iyA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/endpoint": "^10.1.4",
|
|
||||||
"@octokit/request-error": "^6.1.8",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"fast-content-type-parse": "^2.0.0",
|
|
||||||
"universal-user-agent": "^7.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/request-error": {
|
|
||||||
"version": "6.1.8",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.8.tgz",
|
|
||||||
"integrity": "sha512-WEi/R0Jmq+IJKydWlKDmryPcmdYSVjL3ekaiEL1L9eo1sUnqMJ+grqmC9cjk7CA7+b2/T397tO5d8YLOH3qYpQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^14.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/rest": {
|
|
||||||
"version": "21.1.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
|
|
||||||
"integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/core": "^6.1.4",
|
|
||||||
"@octokit/plugin-paginate-rest": "^11.4.2",
|
|
||||||
"@octokit/plugin-request-log": "^5.3.1",
|
|
||||||
"@octokit/plugin-rest-endpoint-methods": "^13.3.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/types": {
|
|
||||||
"version": "14.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-14.1.0.tgz",
|
|
||||||
"integrity": "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^25.1.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/asynckit": {
|
|
||||||
"version": "0.4.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
|
||||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/axios": {
|
|
||||||
"version": "1.12.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
|
|
||||||
"integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"follow-redirects": "^1.15.6",
|
|
||||||
"form-data": "^4.0.4",
|
|
||||||
"proxy-from-env": "^1.1.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/before-after-hook": {
|
|
||||||
"version": "3.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
|
|
||||||
"integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
|
|
||||||
"license": "Apache-2.0"
|
|
||||||
},
|
|
||||||
"node_modules/call-bind-apply-helpers": {
|
|
||||||
"version": "1.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
|
||||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"function-bind": "^1.1.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/combined-stream": {
|
|
||||||
"version": "1.0.8",
|
|
||||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
|
||||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"delayed-stream": "~1.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.8"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/delayed-stream": {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
|
||||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=0.4.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/dunder-proto": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"call-bind-apply-helpers": "^1.0.1",
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"gopd": "^1.2.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-define-property": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-errors": {
|
|
||||||
"version": "1.3.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
|
||||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-object-atoms": {
|
|
||||||
"version": "1.1.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
|
||||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-set-tostringtag": {
|
|
||||||
"version": "2.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
|
||||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"get-intrinsic": "^1.2.6",
|
|
||||||
"has-tostringtag": "^1.0.2",
|
|
||||||
"hasown": "^2.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/fast-content-type-parse": {
|
|
||||||
"version": "2.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
|
|
||||||
"integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
|
|
||||||
"funding": [
|
|
||||||
{
|
|
||||||
"type": "github",
|
|
||||||
"url": "https://github.com/sponsors/fastify"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/fastify"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/follow-redirects": {
|
|
||||||
"version": "1.15.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
|
|
||||||
"integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
|
|
||||||
"funding": [
|
|
||||||
{
|
|
||||||
"type": "individual",
|
|
||||||
"url": "https://github.com/sponsors/RubenVerborgh"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=4.0"
|
|
||||||
},
|
|
||||||
"peerDependenciesMeta": {
|
|
||||||
"debug": {
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/form-data": {
|
|
||||||
"version": "4.0.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
|
||||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"asynckit": "^0.4.0",
|
|
||||||
"combined-stream": "^1.0.8",
|
|
||||||
"es-set-tostringtag": "^2.1.0",
|
|
||||||
"hasown": "^2.0.2",
|
|
||||||
"mime-types": "^2.1.12"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/function-bind": {
|
|
||||||
"version": "1.1.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
|
||||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/get-intrinsic": {
|
|
||||||
"version": "1.3.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
|
||||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"call-bind-apply-helpers": "^1.0.2",
|
|
||||||
"es-define-property": "^1.0.1",
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"es-object-atoms": "^1.1.1",
|
|
||||||
"function-bind": "^1.1.2",
|
|
||||||
"get-proto": "^1.0.1",
|
|
||||||
"gopd": "^1.2.0",
|
|
||||||
"has-symbols": "^1.1.0",
|
|
||||||
"hasown": "^2.0.2",
|
|
||||||
"math-intrinsics": "^1.1.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/get-proto": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"dunder-proto": "^1.0.1",
|
|
||||||
"es-object-atoms": "^1.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/gopd": {
|
|
||||||
"version": "1.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
|
||||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/has-symbols": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/has-tostringtag": {
|
|
||||||
"version": "1.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
|
||||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"has-symbols": "^1.0.3"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/hasown": {
|
|
||||||
"version": "2.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
|
||||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"function-bind": "^1.1.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/math-intrinsics": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/mime-db": {
|
|
||||||
"version": "1.52.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
|
||||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/mime-types": {
|
|
||||||
"version": "2.1.35",
|
|
||||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
|
||||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"mime-db": "1.52.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/proxy-from-env": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/universal-user-agent": {
|
|
||||||
"version": "7.0.3",
|
|
||||||
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz",
|
|
||||||
"integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==",
|
|
||||||
"license": "ISC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
10
.github/scripts/package.json
vendored
10
.github/scripts/package.json
vendored
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"type": "module",
|
|
||||||
"description": "GitHub automation scripts for GreptimeDB",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/rest": "^21.0.0",
|
|
||||||
"axios": "^1.7.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
152
.github/scripts/pr-review-reminder.js
vendored
152
.github/scripts/pr-review-reminder.js
vendored
@@ -1,152 +0,0 @@
|
|||||||
// Daily PR Review Reminder Script
|
|
||||||
// Fetches open PRs from GreptimeDB repository and sends Slack notifications
|
|
||||||
// to PR owners and assigned reviewers to keep review process moving.
|
|
||||||
|
|
||||||
(async () => {
|
|
||||||
const { Octokit } = await import("@octokit/rest");
|
|
||||||
const { default: axios } = await import('axios');
|
|
||||||
|
|
||||||
// Configuration
|
|
||||||
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
|
|
||||||
const SLACK_WEBHOOK_URL = process.env.SLACK_PR_REVIEW_WEBHOOK_URL;
|
|
||||||
const REPO_OWNER = "GreptimeTeam";
|
|
||||||
const REPO_NAME = "greptimedb";
|
|
||||||
const GITHUB_TO_SLACK = JSON.parse(process.env.GITHUBID_SLACKID_MAPPING || '{}');
|
|
||||||
|
|
||||||
// Debug: Print environment variable status
|
|
||||||
console.log("=== Environment Variables Debug ===");
|
|
||||||
console.log(`GITHUB_TOKEN: ${GITHUB_TOKEN ? 'Set ✓' : 'NOT SET ✗'}`);
|
|
||||||
console.log(`SLACK_PR_REVIEW_WEBHOOK_URL: ${SLACK_WEBHOOK_URL ? 'Set ✓' : 'NOT SET ✗'}`);
|
|
||||||
console.log(`GITHUBID_SLACKID_MAPPING: ${process.env.GITHUBID_SLACKID_MAPPING ? `Set ✓ (${Object.keys(GITHUB_TO_SLACK).length} mappings)` : 'NOT SET ✗'}`);
|
|
||||||
console.log("===================================\n");
|
|
||||||
|
|
||||||
const octokit = new Octokit({
|
|
||||||
auth: GITHUB_TOKEN
|
|
||||||
});
|
|
||||||
|
|
||||||
// Fetch all open PRs from the repository
|
|
||||||
async function fetchOpenPRs() {
|
|
||||||
try {
|
|
||||||
const prs = await octokit.pulls.list({
|
|
||||||
owner: REPO_OWNER,
|
|
||||||
repo: REPO_NAME,
|
|
||||||
state: "open",
|
|
||||||
per_page: 100,
|
|
||||||
sort: "created",
|
|
||||||
direction: "asc"
|
|
||||||
});
|
|
||||||
return prs.data.filter((pr) => !pr.draft);
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error fetching PRs:", error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert GitHub username to Slack mention or fallback to GitHub username
|
|
||||||
function toSlackMention(githubUser) {
|
|
||||||
const slackUserId = GITHUB_TO_SLACK[githubUser];
|
|
||||||
return slackUserId ? `<@${slackUserId}>` : `@${githubUser}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate days since PR was opened
|
|
||||||
function getDaysOpen(createdAt) {
|
|
||||||
const created = new Date(createdAt);
|
|
||||||
const now = new Date();
|
|
||||||
const diffMs = now - created;
|
|
||||||
const days = Math.floor(diffMs / (1000 * 60 * 60 * 24));
|
|
||||||
return days;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build Slack notification message from PR list
|
|
||||||
function buildSlackMessage(prs) {
|
|
||||||
if (prs.length === 0) {
|
|
||||||
return "*🎉 Great job! No pending PRs for review.*";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separate PRs by age threshold (14 days)
|
|
||||||
const criticalPRs = [];
|
|
||||||
const recentPRs = [];
|
|
||||||
|
|
||||||
prs.forEach(pr => {
|
|
||||||
const daysOpen = getDaysOpen(pr.created_at);
|
|
||||||
if (daysOpen >= 14) {
|
|
||||||
criticalPRs.push(pr);
|
|
||||||
} else {
|
|
||||||
recentPRs.push(pr);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
const lines = [
|
|
||||||
`*🔍 Daily PR Review Reminder 🔍*`,
|
|
||||||
`Found *${criticalPRs.length}* critical PR(s) (14+ days old)\n`
|
|
||||||
];
|
|
||||||
|
|
||||||
// Show critical PRs (14+ days) in detail
|
|
||||||
if (criticalPRs.length > 0) {
|
|
||||||
criticalPRs.forEach((pr, index) => {
|
|
||||||
const owner = toSlackMention(pr.user.login);
|
|
||||||
const reviewers = pr.requested_reviewers || [];
|
|
||||||
const reviewerMentions = reviewers.map(r => toSlackMention(r.login)).join(", ");
|
|
||||||
const daysOpen = getDaysOpen(pr.created_at);
|
|
||||||
|
|
||||||
const prInfo = `${index + 1}. <${pr.html_url}|#${pr.number}: ${pr.title}>`;
|
|
||||||
const ageInfo = ` 🔴 Opened *${daysOpen}* day(s) ago`;
|
|
||||||
const ownerInfo = ` 👤 Owner: ${owner}`;
|
|
||||||
const reviewerInfo = reviewers.length > 0
|
|
||||||
? ` 👁️ Reviewers: ${reviewerMentions}`
|
|
||||||
: ` 👁️ Reviewers: _Not assigned yet_`;
|
|
||||||
|
|
||||||
lines.push(prInfo);
|
|
||||||
lines.push(ageInfo);
|
|
||||||
lines.push(ownerInfo);
|
|
||||||
lines.push(reviewerInfo);
|
|
||||||
lines.push(""); // Empty line between PRs
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
lines.push("_Let's keep the code review process moving! 🚀_");
|
|
||||||
|
|
||||||
return lines.join("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send notification to Slack webhook
|
|
||||||
async function sendSlackNotification(message) {
|
|
||||||
if (!SLACK_WEBHOOK_URL) {
|
|
||||||
console.log("⚠️ SLACK_PR_REVIEW_WEBHOOK_URL not configured. Message preview:");
|
|
||||||
console.log("=".repeat(60));
|
|
||||||
console.log(message);
|
|
||||||
console.log("=".repeat(60));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await axios.post(SLACK_WEBHOOK_URL, {
|
|
||||||
text: message
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error(`Slack API returned status ${response.status}`);
|
|
||||||
}
|
|
||||||
console.log("Slack notification sent successfully.");
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error sending Slack notification:", error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main execution flow
|
|
||||||
async function run() {
|
|
||||||
console.log(`Fetching open PRs from ${REPO_OWNER}/${REPO_NAME}...`);
|
|
||||||
const prs = await fetchOpenPRs();
|
|
||||||
console.log(`Found ${prs.length} open PR(s).`);
|
|
||||||
|
|
||||||
const message = buildSlackMessage(prs);
|
|
||||||
console.log("Sending Slack notification...");
|
|
||||||
await sendSlackNotification(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
run().catch(error => {
|
|
||||||
console.error("Script execution failed:", error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
||||||
})();
|
|
||||||
34
.github/scripts/pull-test-deps-images.sh
vendored
34
.github/scripts/pull-test-deps-images.sh
vendored
@@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
MAX_RETRIES=3
|
|
||||||
|
|
||||||
IMAGES=(
|
|
||||||
"greptime/zookeeper:3.7"
|
|
||||||
"greptime/kafka:3.9.0-debian-12-r1"
|
|
||||||
"greptime/etcd:3.6.1-debian-12-r3"
|
|
||||||
"greptime/minio:2024"
|
|
||||||
"greptime/mysql:5.7"
|
|
||||||
)
|
|
||||||
|
|
||||||
for image in "${IMAGES[@]}"; do
|
|
||||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
|
||||||
if docker pull "$image"; then
|
|
||||||
# Successfully pulled the image.
|
|
||||||
break
|
|
||||||
else
|
|
||||||
# Use some simple exponential backoff to avoid rate limiting.
|
|
||||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
|
||||||
sleep_seconds=$((attempt * 5))
|
|
||||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
|
||||||
sleep $sleep_seconds # 5s, 10s delays
|
|
||||||
else
|
|
||||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
37
.github/scripts/update-dev-builder-version.sh
vendored
@@ -1,37 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
DEV_BUILDER_IMAGE_TAG=$1
|
|
||||||
|
|
||||||
update_dev_builder_version() {
|
|
||||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
|
||||||
echo "Error: Should specify the dev-builder image tag"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email greptimedb-ci@greptime.com
|
|
||||||
git config --global user.name greptimedb-ci
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update the dev-builder image tag in the Makefile.
|
|
||||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add Makefile
|
|
||||||
git commit -s -m "ci: update dev-builder image tag"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "ci: update dev-builder image tag" \
|
|
||||||
--body "This PR updates the dev-builder image tag" \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer zyy17 \
|
|
||||||
--reviewer daviderli614
|
|
||||||
}
|
|
||||||
|
|
||||||
update_dev_builder_version
|
|
||||||
49
.github/scripts/update-helm-charts-version.sh
vendored
49
.github/scripts/update-helm-charts-version.sh
vendored
@@ -1,49 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
VERSION=${VERSION}
|
|
||||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
|
||||||
|
|
||||||
update_helm_charts_version() {
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email update-helm-charts-version@greptime.com
|
|
||||||
git config --global user.name update-helm-charts-version
|
|
||||||
|
|
||||||
# Clone helm-charts repository.
|
|
||||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
|
||||||
cd helm-charts
|
|
||||||
|
|
||||||
# Set default remote for gh CLI
|
|
||||||
gh repo set-default GreptimeTeam/helm-charts
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update version.
|
|
||||||
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
|
||||||
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
|
||||||
|
|
||||||
# Update docs.
|
|
||||||
make docs
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add .
|
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
|
||||||
--body "This PR updates the GreptimeDB version." \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer sunng87 \
|
|
||||||
--reviewer daviderli614 \
|
|
||||||
--reviewer killme2008 \
|
|
||||||
--reviewer evenyag \
|
|
||||||
--reviewer fengjiachun
|
|
||||||
}
|
|
||||||
|
|
||||||
update_helm_charts_version
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
VERSION=${VERSION}
|
|
||||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
|
||||||
|
|
||||||
update_homebrew_greptime_version() {
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email update-greptime-version@greptime.com
|
|
||||||
git config --global user.name update-greptime-version
|
|
||||||
|
|
||||||
# Clone helm-charts repository.
|
|
||||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
|
||||||
cd homebrew-greptime
|
|
||||||
|
|
||||||
# Set default remote for gh CLI
|
|
||||||
gh repo set-default GreptimeTeam/homebrew-greptime
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update version.
|
|
||||||
make update-greptime-version VERSION=${VERSION}
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add .
|
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
|
||||||
--body "This PR updates the GreptimeDB version." \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer sunng87 \
|
|
||||||
--reviewer daviderli614 \
|
|
||||||
--reviewer killme2008 \
|
|
||||||
--reviewer evenyag \
|
|
||||||
--reviewer fengjiachun
|
|
||||||
}
|
|
||||||
|
|
||||||
update_homebrew_greptime_version
|
|
||||||
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,13 +27,13 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@@ -41,11 +41,11 @@ function upload_artifacts() {
|
|||||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
function update_version_info() {
|
function update_version_info() {
|
||||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
|||||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||||
echo "Updating latest-nightly-version.txt"
|
echo "Updating latest-nightly-version.txt"
|
||||||
echo "$VERSION" > latest-nightly-version.txt
|
echo "$VERSION" > latest-nightly-version.txt
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
11
.github/workflows/apidoc.yml
vendored
11
.github/workflows/apidoc.yml
vendored
@@ -12,17 +12,20 @@ on:
|
|||||||
|
|
||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- run: cargo doc --workspace --no-deps --document-private-items
|
- run: cargo doc --workspace --no-deps --document-private-items
|
||||||
- run: |
|
- run: |
|
||||||
cat <<EOF > target/doc/index.html
|
cat <<EOF > target/doc/index.html
|
||||||
|
|||||||
35
.github/workflows/dependency-check.yml
vendored
35
.github/workflows/dependency-check.yml
vendored
@@ -1,35 +0,0 @@
|
|||||||
name: Check Dependencies
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-dependencies:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Set up Rust
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
|
|
||||||
- name: Run cargo tree
|
|
||||||
run: cargo tree --prefix none > dependencies.txt
|
|
||||||
|
|
||||||
- name: Extract dependency names
|
|
||||||
run: awk '{print $1}' dependencies.txt > dependency_names.txt
|
|
||||||
|
|
||||||
- name: Check for blacklisted crates
|
|
||||||
run: |
|
|
||||||
while read -r dep; do
|
|
||||||
if grep -qFx "$dep" dependency_names.txt; then
|
|
||||||
echo "Blacklisted crate '$dep' found in dependencies."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done < .github/cargo-blacklist.txt
|
|
||||||
echo "No blacklisted crates found."
|
|
||||||
73
.github/workflows/dev-build.yml
vendored
73
.github/workflows/dev-build.yml
vendored
@@ -4,11 +4,10 @@ name: GreptimeDB Development Build
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
large-page-size:
|
repository:
|
||||||
description: Build GreptimeDB with large page size (65536).
|
description: The public repository to build
|
||||||
type: boolean
|
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: GreptimeTeam/greptimedb
|
||||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||||
description: The commit to build
|
description: The commit to build
|
||||||
required: true
|
required: true
|
||||||
@@ -17,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -56,11 +55,6 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
upload_artifacts_to_s3:
|
|
||||||
type: boolean
|
|
||||||
description: Whether upload artifacts to s3
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
cargo_profile:
|
cargo_profile:
|
||||||
type: choice
|
type: choice
|
||||||
description: The cargo profile to use in building GreptimeDB.
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
@@ -82,14 +76,20 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
|
# Use the different image name to avoid conflict with the release images.
|
||||||
|
IMAGE_NAME: greptimedb-dev
|
||||||
|
|
||||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -107,7 +107,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -162,7 +161,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -170,7 +168,6 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
persist-credentials: true
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -180,9 +177,6 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -196,7 +190,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -204,7 +197,6 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
persist-credentials: true
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -214,9 +206,6 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -226,34 +215,26 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
image-name: ${{ env.IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
- name: Echo Docker image tag to step summary
|
|
||||||
run: |
|
|
||||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
run: |
|
run: |
|
||||||
@@ -266,20 +247,19 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
src-image-name: ${{ env.IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -289,7 +269,6 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -298,7 +277,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -308,7 +287,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -324,7 +302,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -334,7 +312,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -352,17 +329,11 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
643
.github/workflows/develop.yml
vendored
643
.github/workflows/develop.yml
vendored
@@ -1,6 +1,4 @@
|
|||||||
on:
|
on:
|
||||||
schedule:
|
|
||||||
- cron: "0 15 * * 1-5"
|
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
@@ -12,7 +10,17 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
- 'Makefile'
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
- 'config/**'
|
||||||
|
- '**.md'
|
||||||
|
- '.dockerignore'
|
||||||
|
- 'docker/**'
|
||||||
|
- '.gitignore'
|
||||||
|
- 'grafana/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -21,15 +29,15 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
- name: Check the config docs
|
- name: Check the config docs
|
||||||
run: |
|
run: |
|
||||||
@@ -38,86 +46,81 @@ jobs:
|
|||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
runs-on: ubuntu-20.04
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ windows-2022, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Clippy` job
|
# Shares with `Clippy` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
toolchain: stable
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "check-toml"
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
run: cargo +stable install taplo-cli --version ^0.9 --locked
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
build:
|
build:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binaries
|
name: Build GreptimeDB binaries
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-binaries"
|
shared-key: "build-binaries"
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin
|
||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -133,50 +136,38 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
fuzztest:
|
fuzztest:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Fuzz Test
|
name: Fuzz Test
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
steps:
|
steps:
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
cargo +nightly install cargo-fuzz
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bins
|
name: bins
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: |
|
run: tar -xvf ./bins.tar.gz
|
||||||
tar -xvf ./bins.tar.gz
|
|
||||||
rm ./bins.tar.gz
|
|
||||||
- name: Run GreptimeDB
|
- name: Run GreptimeDB
|
||||||
run: |
|
run: |
|
||||||
./bins/greptime standalone start&
|
./bins/greptime standalone start&
|
||||||
@@ -190,55 +181,50 @@ jobs:
|
|||||||
max-total-time: 120
|
max-total-time: 120
|
||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build-greptime-ci
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
steps:
|
steps:
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz cargo-gc-bin --force
|
cargo install cargo-fuzz
|
||||||
- name: Download pre-built binary
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bin
|
name: bins
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binary
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
|
- name: Build Fuzz Test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
tar -xvf ./bin.tar.gz
|
cd tests-fuzz &
|
||||||
rm ./bin.tar.gz
|
cargo install cargo-gc-bin &
|
||||||
|
cargo gc &
|
||||||
|
cd ..
|
||||||
- name: Run Fuzz Test
|
- name: Run Fuzz Test
|
||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||||
with:
|
with:
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -251,41 +237,33 @@ jobs:
|
|||||||
name: unstable-fuzz-logs
|
name: unstable-fuzz-logs
|
||||||
path: /tmp/unstable-greptime/
|
path: /tmp/unstable-greptime/
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
- name: Describe pods
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
|
|
||||||
build-greptime-ci:
|
build-greptime-ci:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binary (profile-CI)
|
name: Build GreptimeDB binary (profile-CI)
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-greptime-ci"
|
shared-key: "build-greptime-ci"
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin
|
||||||
- name: Build greptime binary
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
run: cargo build --bin greptime --profile ci
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -300,57 +278,36 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
name: Fuzz Test (Distributed, Disk)
|
||||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
mode:
|
|
||||||
- name: "Remote WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: true
|
|
||||||
values: "with-remote-wal.yaml"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
- if: matrix.mode.minio
|
- name: Setup Etcd cluser
|
||||||
name: Setup Minio
|
|
||||||
uses: ./.github/actions/setup-minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup Kafka cluster
|
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
|
||||||
- name: Setup Etcd cluster
|
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
cargo +nightly install cargo-fuzz
|
||||||
# Downloads ci image
|
# Downloads ci image
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binariy
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
@@ -358,9 +315,7 @@ jobs:
|
|||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binary
|
- name: Unzip binary
|
||||||
run: |
|
run: tar -xvf ./bin.tar.gz
|
||||||
tar -xvf ./bin.tar.gz
|
|
||||||
rm ./bin.tar.gz
|
|
||||||
- name: Build and push GreptimeDB image
|
- name: Build and push GreptimeDB image
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
uses: ./.github/actions/build-and-push-ci-image
|
||||||
- name: Wait for etcd
|
- name: Wait for etcd
|
||||||
@@ -370,22 +325,6 @@ jobs:
|
|||||||
pod -l app.kubernetes.io/instance=etcd \
|
pod -l app.kubernetes.io/instance=etcd \
|
||||||
--timeout=120s \
|
--timeout=120s \
|
||||||
-n etcd-cluster
|
-n etcd-cluster
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Wait for minio
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app=minio \
|
|
||||||
--timeout=120s \
|
|
||||||
-n minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Wait for kafka
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=kafka \
|
|
||||||
--timeout=120s \
|
|
||||||
-n kafka-cluster
|
|
||||||
- name: Print etcd info
|
- name: Print etcd info
|
||||||
shell: bash
|
shell: bash
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
run: kubectl get all --show-labels -n etcd-cluster
|
||||||
@@ -394,7 +333,6 @@ jobs:
|
|||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
uses: ./.github/actions/setup-greptimedb-cluster
|
||||||
with:
|
with:
|
||||||
image-registry: localhost:5001
|
image-registry: localhost:5001
|
||||||
values-filename: ${{ matrix.mode.values }}
|
|
||||||
- name: Port forward (mysql)
|
- name: Port forward (mysql)
|
||||||
run: |
|
run: |
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||||
@@ -411,11 +349,6 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
- name: Describe pod
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -425,208 +358,21 @@ jobs:
|
|||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
||||||
path: /tmp/kind
|
path: /tmp/kind
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
- name: Delete cluster
|
|
||||||
if: success()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind delete cluster
|
|
||||||
docker stop $(docker ps -a -q)
|
|
||||||
docker rm $(docker ps -a -q)
|
|
||||||
docker system prune -f
|
|
||||||
|
|
||||||
distributed-fuzztest-with-chaos:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build-greptime-ci
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
|
||||||
mode:
|
|
||||||
- name: "Remote WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: true
|
|
||||||
values: "with-remote-wal.yaml"
|
|
||||||
include:
|
|
||||||
- target: "fuzz_migrate_mito_regions"
|
|
||||||
mode:
|
|
||||||
name: "Local WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: false
|
|
||||||
values: "with-minio.yaml"
|
|
||||||
- target: "fuzz_migrate_metric_regions"
|
|
||||||
mode:
|
|
||||||
name: "Local WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: false
|
|
||||||
values: "with-minio.yaml"
|
|
||||||
steps:
|
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Setup Kind
|
|
||||||
uses: ./.github/actions/setup-kind
|
|
||||||
- name: Setup Chaos Mesh
|
|
||||||
uses: ./.github/actions/setup-chaos
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Setup Minio
|
|
||||||
uses: ./.github/actions/setup-minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup Kafka cluster
|
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
|
||||||
- name: Setup Etcd cluster
|
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
|
||||||
# Prepares for fuzz tests
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Set Rust Fuzz
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
|
||||||
rustup install nightly
|
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
|
||||||
# Downloads ci image
|
|
||||||
- name: Download pre-built binariy
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bin
|
|
||||||
path: .
|
|
||||||
- name: Unzip binary
|
|
||||||
run: |
|
|
||||||
tar -xvf ./bin.tar.gz
|
|
||||||
rm ./bin.tar.gz
|
|
||||||
- name: Build and push GreptimeDB image
|
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
|
||||||
- name: Wait for etcd
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=etcd \
|
|
||||||
--timeout=120s \
|
|
||||||
-n etcd-cluster
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Wait for minio
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app=minio \
|
|
||||||
--timeout=120s \
|
|
||||||
-n minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Wait for kafka
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=kafka \
|
|
||||||
--timeout=120s \
|
|
||||||
-n kafka-cluster
|
|
||||||
- name: Print etcd info
|
|
||||||
shell: bash
|
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
|
||||||
# Setup cluster for test
|
|
||||||
- name: Setup GreptimeDB cluster
|
|
||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
|
||||||
with:
|
|
||||||
image-registry: localhost:5001
|
|
||||||
values-filename: ${{ matrix.mode.values }}
|
|
||||||
enable-region-failover: ${{ matrix.mode.kafka }}
|
|
||||||
- name: Port forward (mysql)
|
|
||||||
run: |
|
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
|
||||||
- name: Fuzz Test
|
|
||||||
uses: ./.github/actions/fuzz-test
|
|
||||||
env:
|
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
with:
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
max-total-time: 120
|
|
||||||
- name: Describe Nodes
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe nodes
|
|
||||||
- name: Describe pods
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
- name: Export kind logs
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind export logs /tmp/kind
|
|
||||||
- name: Upload logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
|
||||||
path: /tmp/kind
|
|
||||||
retention-days: 3
|
|
||||||
- name: Delete cluster
|
|
||||||
if: success()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind delete cluster
|
|
||||||
docker stop $(docker ps -a -q)
|
|
||||||
docker rm $(docker ps -a -q)
|
|
||||||
docker system prune -f
|
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
name: Sqlness Test
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-20.04 ]
|
||||||
mode:
|
|
||||||
- name: "Basic"
|
|
||||||
opts: ""
|
|
||||||
kafka: false
|
|
||||||
- name: "Remote WAL"
|
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
|
||||||
kafka: true
|
|
||||||
- name: "PostgreSQL KvBackend"
|
|
||||||
opts: "--setup-pg"
|
|
||||||
kafka: false
|
|
||||||
- name: "MySQL Kvbackend"
|
|
||||||
opts: "--setup-mysql"
|
|
||||||
kafka: false
|
|
||||||
- name: "Flat format"
|
|
||||||
opts: "--enable-flat-format"
|
|
||||||
kafka: false
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures
|
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka
|
|
||||||
|
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -635,47 +381,78 @@ jobs:
|
|||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner bare ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: failure()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs-${{ matrix.mode.name }}
|
name: sqlness-logs
|
||||||
|
path: /tmp/sqlness*
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
sqlness-kafka-wal:
|
||||||
|
name: Sqlness Test with Kafka Wal
|
||||||
|
needs: build
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Download pre-built binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bins
|
||||||
|
path: .
|
||||||
|
- name: Unzip binaries
|
||||||
|
run: tar -xvf ./bins.tar.gz
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Run sqlness
|
||||||
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sqlness-logs-with-kafka-wal
|
||||||
path: /tmp/sqlness*
|
path: /tmp/sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Check format
|
- name: Rust Cache
|
||||||
run: make fmt-check
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "check-rust-fmt"
|
||||||
|
- name: Run cargo fmt
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
@@ -683,158 +460,63 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Check` job
|
# Shares with `Check` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
check-udeps:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check Unused Dependencies
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
shared-key: "check-udeps"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-udeps
|
|
||||||
run: cargo install cargo-udeps --locked
|
|
||||||
- name: Check unused dependencies
|
|
||||||
run: make check-udeps
|
|
||||||
|
|
||||||
conflict-check:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check for conflict
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Merge Conflict Finder
|
|
||||||
uses: olivernybroe/action-conflict-finder@v4.0
|
|
||||||
|
|
||||||
test:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
|
||||||
runs-on: ubuntu-22.04-arm
|
|
||||||
timeout-minutes: 60
|
|
||||||
needs: [conflict-check, clippy, fmt, check-udeps]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: rui314/setup-mold@v1
|
|
||||||
- name: Install toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
with:
|
|
||||||
cache: false
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares cross multiple jobs
|
|
||||||
shared-key: "coverage-test"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install latest nextest release
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
|
|
||||||
- name: Setup external services
|
|
||||||
working-directory: tests-integration/fixtures
|
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
|
||||||
|
|
||||||
- name: Run nextest cases
|
|
||||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
RUST_MIN_STACK: 8388608 # 8MB
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
|
||||||
GT_MINIO_BUCKET: greptime
|
|
||||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
|
||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
|
||||||
GT_MINIO_REGION: us-west-2
|
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
|
||||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
|
||||||
GT_POSTGRES15_SCHEMA: test_schema
|
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-22.04-8-cores
|
runs-on: ubuntu-20.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: rui314/setup-mold@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
- name: Install toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
with:
|
with:
|
||||||
components: llvm-tools
|
version: "14.0"
|
||||||
cache: false
|
- name: Install toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares cross multiple jobs
|
# Shares cross multiple jobs
|
||||||
shared-key: "coverage-test"
|
shared-key: "coverage-test"
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
- name: Docker Cache
|
||||||
|
uses: ScribeMD/docker-cache@0.3.7
|
||||||
|
with:
|
||||||
|
key: docker-${{ runner.os }}-coverage
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
|
- name: Install Python
|
||||||
- name: Setup external services
|
uses: actions/setup-python@v5
|
||||||
working-directory: tests-integration/fixtures
|
with:
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
python-version: '3.10'
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
run: pip install pyarrow
|
||||||
|
- name: Setup etcd server
|
||||||
|
working-directory: tests-integration/fixtures/etcd
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
GT_MINIO_BUCKET: greptime
|
|
||||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
|
||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
|
||||||
GT_MINIO_REGION: us-west-2
|
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
|
||||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
|
||||||
GT_POSTGRES15_SCHEMA: test_schema
|
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v4
|
||||||
@@ -846,10 +528,9 @@ jobs:
|
|||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
# compat:
|
||||||
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
# name: Compatibility Test
|
# name: Compatibility Test
|
||||||
# needs: build
|
# needs: build
|
||||||
# runs-on: ubuntu-22.04
|
# runs-on: ubuntu-20.04
|
||||||
# timeout-minutes: 60
|
# timeout-minutes: 60
|
||||||
# steps:
|
# steps:
|
||||||
# - uses: actions/checkout@v4
|
# - uses: actions/checkout@v4
|
||||||
|
|||||||
13
.github/workflows/docbot.yml
vendored
13
.github/workflows/docbot.yml
vendored
@@ -3,21 +3,16 @@ on:
|
|||||||
pull_request_target:
|
pull_request_target:
|
||||||
types: [opened, edited]
|
types: [opened, edited]
|
||||||
|
|
||||||
concurrency:
|
permissions:
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
pull-requests: write
|
||||||
cancel-in-progress: true
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docbot:
|
docbot:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
contents: read
|
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Maybe Follow Up Docs Issue
|
- name: Maybe Follow Up Docs Issue
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
46
.github/workflows/docs.yml
vendored
46
.github/workflows/docs.yml
vendored
@@ -10,7 +10,6 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
- 'Makefile'
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@@ -22,7 +21,6 @@ on:
|
|||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
- 'grafana/**'
|
||||||
- 'Makefile'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -33,65 +31,55 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
|
||||||
- run: 'echo "No action required"'
|
|
||||||
|
|
||||||
check-udeps:
|
|
||||||
name: Unused Dependencies
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
|
||||||
- run: 'echo "No action required"'
|
|
||||||
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-20.04 ]
|
||||||
mode:
|
steps:
|
||||||
- name: "Basic"
|
- run: 'echo "No action required"'
|
||||||
- name: "Remote WAL"
|
|
||||||
- name: "Flat format"
|
sqlness-kafka-wal:
|
||||||
|
name: Sqlness Test with Kafka Wal
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
26
.github/workflows/grafana.yml
vendored
26
.github/workflows/grafana.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: Check Grafana Panels
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-panels:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Check out the repository
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
# Install jq (required for the script)
|
|
||||||
- name: Install jq
|
|
||||||
run: sudo apt-get install -y jq
|
|
||||||
|
|
||||||
# Make the check.sh script executable
|
|
||||||
- name: Check grafana dashboards
|
|
||||||
run: |
|
|
||||||
make check-dashboards
|
|
||||||
57
.github/workflows/multi-lang-tests.yml
vendored
57
.github/workflows/multi-lang-tests.yml
vendored
@@ -1,57 +0,0 @@
|
|||||||
name: Multi-language Integration Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-greptimedb:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binary
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
shared-key: "multi-lang-build"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
|
||||||
shell: bash
|
|
||||||
run: cargo install cargo-gc-bin --force
|
|
||||||
- name: Build greptime binary
|
|
||||||
shell: bash
|
|
||||||
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
|
||||||
- name: Pack greptime binary
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
mkdir bin && \
|
|
||||||
mv ./target/debug/greptime bin
|
|
||||||
- name: Print greptime binary info
|
|
||||||
run: ls -lh bin
|
|
||||||
- name: Upload greptime binary
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: greptime-bin
|
|
||||||
path: bin/
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
needs: build-greptimedb
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-bin
|
|
||||||
78
.github/workflows/nightly-build.yml
vendored
78
.github/workflows/nightly-build.yml
vendored
@@ -12,13 +12,13 @@ on:
|
|||||||
linux_amd64_runner:
|
linux_amd64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.2xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -27,7 +27,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.4xlarge-arm64
|
default: ec2-c6g.2xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
@@ -66,11 +66,18 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
|
# Use the different image name to avoid conflict with the release images.
|
||||||
|
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||||
|
IMAGE_NAME: greptimedb-nightly
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -88,7 +95,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -141,7 +147,6 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -149,8 +154,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -163,7 +166,6 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -171,20 +173,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
|
||||||
needs: [
|
|
||||||
allocate-runners,
|
|
||||||
build-linux-amd64-artifacts,
|
|
||||||
]
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
|
||||||
artifact-is-tarball: true
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -194,25 +182,24 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
image-name: ${{ env.IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
|
||||||
- name: Set nightly build result
|
- name: Set nightly build result
|
||||||
id: set-nightly-build-result
|
id: set-nightly-build-result
|
||||||
@@ -226,7 +213,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -235,14 +222,13 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
src-image-name: ${{ env.IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -252,16 +238,15 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: false
|
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
update-version-info: false # Don't update version info in S3.
|
update-version-info: false # Don't update version info in S3.
|
||||||
push-latest-tag: false
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -271,7 +256,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -287,7 +271,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -297,7 +281,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -313,18 +296,13 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
@@ -332,17 +310,17 @@ jobs:
|
|||||||
run: pnpm tsx bin/report-ci-failure.ts
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||||
- name: Notify nightly build successful result
|
- name: Notify nightly build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notify nightly build failed result
|
- name: Notify nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
86
.github/workflows/nightly-ci.yml
vendored
86
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 23 * * 1-4"
|
- cron: "0 23 * * 1-5"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: Nightly CI
|
name: Nightly CI
|
||||||
@@ -9,21 +9,22 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Check install.sh
|
|
||||||
run: ./.github/scripts/check-install-script.sh
|
|
||||||
|
|
||||||
- name: Run sqlness test
|
- name: Run sqlness test
|
||||||
uses: ./.github/actions/sqlness-test
|
uses: ./.github/actions/sqlness-test
|
||||||
with:
|
with:
|
||||||
@@ -32,43 +33,31 @@ jobs:
|
|||||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
- name: Upload sqlness logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: sqlness-logs-kind
|
|
||||||
path: /tmp/kind/
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
sqlness-windows:
|
sqlness-windows:
|
||||||
name: Sqlness tests on Windows
|
name: Sqlness tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-2022-8-cores
|
runs-on: windows-2022-8-cores
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: make sqlness-test
|
run: cargo sqlness
|
||||||
env:
|
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: failure()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
test-on-windows:
|
test-on-windows:
|
||||||
@@ -79,9 +68,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -90,49 +76,46 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
version: "14.0"
|
version: "14.0"
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install Cargo Nextest
|
- name: Install Cargo Nextest
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
run: pip install pyarrow
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
distribution: Ubuntu-22.04
|
distribution: Ubuntu-22.04
|
||||||
- name: Running tests
|
- name: Running tests
|
||||||
run: cargo nextest run -F dashboard
|
run: cargo nextest run -F pyo3_backend,dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
|
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
cleanbuild-linux-nix:
|
|
||||||
name: Run clean build on Linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
timeout-minutes: 45
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: cachix/install-nix-action@v31
|
|
||||||
- run: nix develop --command cargo check --bin greptime
|
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [
|
||||||
|
sqlness-test,
|
||||||
|
sqlness-windows,
|
||||||
|
test-on-windows,
|
||||||
|
]
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -144,15 +127,14 @@ jobs:
|
|||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [check-status]
|
needs: [
|
||||||
runs-on: ubuntu-latest
|
check-status
|
||||||
|
]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
42
.github/workflows/pr-labeling.yaml
vendored
42
.github/workflows/pr-labeling.yaml
vendored
@@ -1,42 +0,0 @@
|
|||||||
name: 'PR Labeling'
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
labeler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/labeler@v5
|
|
||||||
with:
|
|
||||||
configuration-path: ".github/labeler.yaml"
|
|
||||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
|
|
||||||
size-label:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: pascalgn/size-label-action@v0.5.5
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
with:
|
|
||||||
sizes: >
|
|
||||||
{
|
|
||||||
"0": "XS",
|
|
||||||
"100": "S",
|
|
||||||
"300": "M",
|
|
||||||
"1000": "L",
|
|
||||||
"1500": "XL",
|
|
||||||
"2000": "XXL"
|
|
||||||
}
|
|
||||||
36
.github/workflows/pr-review-reminder.yml
vendored
36
.github/workflows/pr-review-reminder.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: PR Review Reminder
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
|
|
||||||
- cron: '0 1 * * 1,3,5'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pr-review-reminder:
|
|
||||||
name: Send PR Review Reminders
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '20'
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
working-directory: .github/scripts
|
|
||||||
run: npm ci
|
|
||||||
|
|
||||||
- name: Run PR review reminder
|
|
||||||
working-directory: .github/scripts
|
|
||||||
run: node pr-review-reminder.js
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
SLACK_PR_REVIEW_WEBHOOK_URL: ${{ vars.SLACK_PR_REVIEW_WEBHOOK_URL }}
|
|
||||||
GITHUBID_SLACKID_MAPPING: ${{ vars.GITHUBID_SLACKID_MAPPING }}
|
|
||||||
205
.github/workflows/release-dev-builder-images.yaml
vendored
205
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,14 +1,12 @@
|
|||||||
name: Release dev-builder images
|
name: Release dev-builder images
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- rust-toolchain.toml
|
|
||||||
- 'docker/dev-builder/**'
|
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
|
version:
|
||||||
|
description: Version of the dev-builder
|
||||||
|
required: false
|
||||||
|
default: latest
|
||||||
release_dev_builder_ubuntu_image:
|
release_dev_builder_ubuntu_image:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Release dev-builder-ubuntu image
|
description: Release dev-builder-ubuntu image
|
||||||
@@ -24,209 +22,64 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
update_dev_builder_image_tag:
|
|
||||||
type: boolean
|
|
||||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
# The jobs are triggered by the following events:
|
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||||
# 1. Manually triggered workflow_dispatch event
|
runs-on: ubuntu-20.04-16-cores
|
||||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
|
||||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Configure build image version
|
|
||||||
id: set-version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
|
||||||
buildTime=`date +%Y%m%d%H%M%S`
|
|
||||||
BUILD_VERSION="$commitShortSHA-$buildTime"
|
|
||||||
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
|
||||||
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
|
||||||
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
|
||||||
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Build and push dev builder images
|
- name: Build and push dev builder images
|
||||||
uses: ./.github/actions/build-dev-builder-images
|
uses: ./.github/actions/build-dev-builder-images
|
||||||
with:
|
with:
|
||||||
version: ${{ env.VERSION }}
|
version: ${{ inputs.version }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
|
||||||
name: Release dev builder images to AWS ECR
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [
|
|
||||||
release-dev-builder-images
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.ECR_REGION }}
|
|
||||||
|
|
||||||
- name: Login to Amazon ECR
|
|
||||||
id: login-ecr-public
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
env:
|
|
||||||
AWS_REGION: ${{ vars.ECR_REGION }}
|
|
||||||
with:
|
|
||||||
registry-type: public
|
|
||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
|
||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Login to AliCloud Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
|
||||||
|
|
||||||
update-dev-builder-image-tag:
|
|
||||||
name: Update dev-builder image tag
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
|
||||||
needs: [
|
|
||||||
release-dev-builder-images
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Update dev-builder image tag
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
|
|||||||
182
.github/workflows/release.yml
vendored
182
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -31,9 +31,8 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.8xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ubuntu-2204-32-cores-arm
|
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||||
@@ -83,19 +82,27 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
|
|
||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
|
NEXT_RELEASE_VERSION: v0.9.0
|
||||||
|
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -110,24 +117,15 @@ jobs:
|
|||||||
|
|
||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
|
|
||||||
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Check Rust toolchain version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
./scripts/check-builder-rust-version.sh
|
|
||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -136,13 +134,9 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
|
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
- name: Check version
|
|
||||||
id: check-version
|
|
||||||
run: |
|
|
||||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
uses: ./.github/actions/start-runner
|
uses: ./.github/actions/start-runner
|
||||||
@@ -182,7 +176,6 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -190,8 +183,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -204,7 +195,6 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -212,20 +202,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
|
||||||
needs: [
|
|
||||||
allocate-runners,
|
|
||||||
build-linux-amd64-artifacts,
|
|
||||||
]
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
|
||||||
artifact-is-tarball: true
|
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
@@ -237,10 +213,18 @@ jobs:
|
|||||||
arch: aarch64-apple-darwin
|
arch: aarch64-apple-darwin
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-darwin-arm64
|
artifacts-dir-prefix: greptime-darwin-arm64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
|
arch: aarch64-apple-darwin
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
arch: x86_64-apple-darwin
|
arch: x86_64-apple-darwin
|
||||||
artifacts-dir-prefix: greptime-darwin-amd64
|
artifacts-dir-prefix: greptime-darwin-amd64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
arch: x86_64-apple-darwin
|
||||||
|
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
outputs:
|
||||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||||
@@ -252,16 +236,15 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-macos-artifacts
|
- uses: ./.github/actions/build-macos-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
|
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
disable-run-tests: true
|
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
- name: Set build macos result
|
- name: Set build macos result
|
||||||
@@ -279,6 +262,10 @@ jobs:
|
|||||||
arch: x86_64-pc-windows-msvc
|
arch: x86_64-pc-windows-msvc
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-windows-amd64
|
artifacts-dir-prefix: greptime-windows-amd64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||||
|
arch: x86_64-pc-windows-msvc
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
outputs:
|
||||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||||
@@ -292,11 +279,11 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-windows-artifacts
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
|
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
@@ -315,27 +302,23 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-2004-16-cores
|
||||||
outputs:
|
outputs:
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -353,7 +336,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest-16-cores
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -362,14 +345,13 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
src-image-name: greptimedb
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -380,9 +362,8 @@ jobs:
|
|||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: true
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -394,14 +375,12 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Publish GitHub release
|
- name: Publish GitHub release
|
||||||
uses: ./.github/actions/publish-github-release
|
uses: ./.github/actions/publish-github-release
|
||||||
@@ -410,12 +389,12 @@ jobs:
|
|||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -425,7 +404,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -441,7 +419,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -451,7 +429,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -463,74 +440,6 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
bump-downstream-repo-versions:
|
|
||||||
name: Bump downstream repo versions
|
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Bump downstream repo versions
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/bump-versions.ts
|
|
||||||
env:
|
|
||||||
TARGET_REPOS: website,docs,demo
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
|
||||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
|
||||||
|
|
||||||
bump-helm-charts-version:
|
|
||||||
name: Bump helm charts version
|
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Bump helm charts version
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-helm-charts-version.sh
|
|
||||||
|
|
||||||
bump-homebrew-greptime-version:
|
|
||||||
name: Bump homebrew greptime version
|
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Bump homebrew greptime version
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-homebrew-greptme-version.sh
|
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
@@ -539,18 +448,11 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
194
.github/workflows/run-multi-lang-tests.yml
vendored
@@ -1,194 +0,0 @@
|
|||||||
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
|
||||||
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
|
||||||
# Supports both direct binary artifacts and tarball artifacts
|
|
||||||
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
artifact-name:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
description: 'Name of the artifact containing greptime binary'
|
|
||||||
http-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4000'
|
|
||||||
description: 'HTTP server port'
|
|
||||||
mysql-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4002'
|
|
||||||
description: 'MySQL server port'
|
|
||||||
postgres-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4003'
|
|
||||||
description: 'PostgreSQL server port'
|
|
||||||
db-name:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'test_db'
|
|
||||||
description: 'Test database name'
|
|
||||||
username:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'greptime_user'
|
|
||||||
description: 'Authentication username'
|
|
||||||
password:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'greptime_pwd'
|
|
||||||
description: 'Authentication password'
|
|
||||||
timeout-minutes:
|
|
||||||
required: false
|
|
||||||
type: number
|
|
||||||
default: 30
|
|
||||||
description: 'Job timeout in minutes'
|
|
||||||
artifact-is-tarball:
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: ${{ inputs.timeout-minutes }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout greptimedb-tests repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
repository: GreptimeTeam/greptimedb-tests
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Download pre-built greptime binary
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.artifact-name }}
|
|
||||||
path: artifact
|
|
||||||
|
|
||||||
- name: Setup greptime binary
|
|
||||||
run: |
|
|
||||||
mkdir -p bin
|
|
||||||
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
|
||||||
# Extract tarball and find greptime binary
|
|
||||||
tar -xzf artifact/*.tar.gz -C artifact
|
|
||||||
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
|
||||||
else
|
|
||||||
# Direct binary format
|
|
||||||
if [ -f artifact/greptime ]; then
|
|
||||||
cp artifact/greptime bin/greptime
|
|
||||||
else
|
|
||||||
cp artifact/* bin/greptime
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
chmod +x ./bin/greptime
|
|
||||||
ls -lh ./bin/greptime
|
|
||||||
./bin/greptime --version
|
|
||||||
|
|
||||||
- name: Setup Java 17
|
|
||||||
uses: actions/setup-java@v4
|
|
||||||
with:
|
|
||||||
distribution: 'temurin'
|
|
||||||
java-version: '17'
|
|
||||||
cache: 'maven'
|
|
||||||
|
|
||||||
- name: Setup Python 3.8
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
|
|
||||||
- name: Setup Go 1.24
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.24'
|
|
||||||
cache: true
|
|
||||||
cache-dependency-path: go-tests/go.sum
|
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '18'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
pip install mysql-connector-python psycopg2-binary
|
|
||||||
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
|
||||||
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
|
||||||
|
|
||||||
- name: Install Go dependencies
|
|
||||||
working-directory: go-tests
|
|
||||||
run: |
|
|
||||||
go mod download
|
|
||||||
go mod verify
|
|
||||||
go version
|
|
||||||
|
|
||||||
- name: Kill existing GreptimeDB processes
|
|
||||||
run: |
|
|
||||||
pkill -f greptime || true
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
- name: Start GreptimeDB standalone
|
|
||||||
run: |
|
|
||||||
./bin/greptime standalone start \
|
|
||||||
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
|
||||||
--rpc-addr 0.0.0.0:4001 \
|
|
||||||
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
|
||||||
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
|
||||||
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
|
||||||
|
|
||||||
- name: Wait for GreptimeDB to be ready
|
|
||||||
run: |
|
|
||||||
echo "Waiting for GreptimeDB..."
|
|
||||||
for i in {1..60}; do
|
|
||||||
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
|
||||||
echo "✅ GreptimeDB is ready"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "❌ GreptimeDB failed to start"
|
|
||||||
cat /tmp/greptimedb.log
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Run multi-language tests
|
|
||||||
env:
|
|
||||||
DB_NAME: ${{ inputs.db-name }}
|
|
||||||
MYSQL_HOST: 127.0.0.1
|
|
||||||
MYSQL_PORT: ${{ inputs.mysql-port }}
|
|
||||||
POSTGRES_HOST: 127.0.0.1
|
|
||||||
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
|
||||||
HTTP_HOST: 127.0.0.1
|
|
||||||
HTTP_PORT: ${{ inputs.http-port }}
|
|
||||||
GREPTIME_USERNAME: ${{ inputs.username }}
|
|
||||||
GREPTIME_PASSWORD: ${{ inputs.password }}
|
|
||||||
run: |
|
|
||||||
chmod +x ./run_tests.sh
|
|
||||||
./run_tests.sh
|
|
||||||
|
|
||||||
- name: Collect logs on failure
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
echo "=== GreptimeDB Logs ==="
|
|
||||||
cat /tmp/greptimedb.log || true
|
|
||||||
|
|
||||||
- name: Upload test logs on failure
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-logs
|
|
||||||
path: |
|
|
||||||
/tmp/greptimedb.log
|
|
||||||
java-tests/target/surefire-reports/
|
|
||||||
python-tests/.pytest_cache/
|
|
||||||
go-tests/*.log
|
|
||||||
**/test-output/
|
|
||||||
retention-days: 7
|
|
||||||
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pkill -f greptime || true
|
|
||||||
10
.github/workflows/schedule.yml
vendored
10
.github/workflows/schedule.yml
vendored
@@ -4,20 +4,18 @@ on:
|
|||||||
- cron: '4 2 * * *'
|
- cron: '4 2 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
maintenance:
|
maintenance:
|
||||||
name: Periodic Maintenance
|
name: Periodic Maintenance
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Do Maintenance
|
- name: Do Maintenance
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
11
.github/workflows/semantic-pull-request.yml
vendored
11
.github/workflows/semantic-pull-request.yml
vendored
@@ -7,18 +7,9 @@ on:
|
|||||||
- reopened
|
- reopened
|
||||||
- edited
|
- edited
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|||||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -28,7 +28,6 @@ debug/
|
|||||||
# Logs
|
# Logs
|
||||||
**/__unittest_logs
|
**/__unittest_logs
|
||||||
logs/
|
logs/
|
||||||
!grafana/dashboards/logs/
|
|
||||||
|
|
||||||
# cpython's generated python byte code
|
# cpython's generated python byte code
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
@@ -51,19 +50,3 @@ venv/
|
|||||||
# Fuzz tests
|
# Fuzz tests
|
||||||
tests-fuzz/artifacts/
|
tests-fuzz/artifacts/
|
||||||
tests-fuzz/corpus/
|
tests-fuzz/corpus/
|
||||||
|
|
||||||
# cargo-udeps reports
|
|
||||||
udeps-report.json
|
|
||||||
|
|
||||||
# Nix
|
|
||||||
.direnv
|
|
||||||
.envrc
|
|
||||||
|
|
||||||
## default data home
|
|
||||||
greptimedb_data
|
|
||||||
|
|
||||||
# github
|
|
||||||
!/.github
|
|
||||||
|
|
||||||
# Claude code
|
|
||||||
CLAUDE.md
|
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: fmt
|
- id: fmt
|
||||||
- id: clippy
|
- id: clippy
|
||||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||||
stages: [pre-push]
|
stages: [push]
|
||||||
- id: cargo-check
|
- id: cargo-check
|
||||||
args: ["--workspace", "--all-targets", "--all-features"]
|
|
||||||
|
|||||||
43
AUTHOR.md
43
AUTHOR.md
@@ -1,43 +0,0 @@
|
|||||||
# GreptimeDB Authors
|
|
||||||
|
|
||||||
## Individual Committers (in alphabetical order)
|
|
||||||
|
|
||||||
- [apdong2022](https://github.com/apdong2022)
|
|
||||||
- [beryl678](https://github.com/beryl678)
|
|
||||||
- [CookiePieWw](https://github.com/CookiePieWw)
|
|
||||||
- [etolbakov](https://github.com/etolbakov)
|
|
||||||
- [irenjj](https://github.com/irenjj)
|
|
||||||
- [KKould](https://github.com/KKould)
|
|
||||||
- [Lanqing Yang](https://github.com/lyang24)
|
|
||||||
- [nicecui](https://github.com/nicecui)
|
|
||||||
- [NiwakaDev](https://github.com/NiwakaDev)
|
|
||||||
- [paomian](https://github.com/paomian)
|
|
||||||
- [tisonkun](https://github.com/tisonkun)
|
|
||||||
- [Wenjie0329](https://github.com/Wenjie0329)
|
|
||||||
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
|
||||||
- [zhongzc](https://github.com/zhongzc)
|
|
||||||
- [ZonaHex](https://github.com/ZonaHex)
|
|
||||||
- [zyy17](https://github.com/zyy17)
|
|
||||||
|
|
||||||
## Team Members (in alphabetical order)
|
|
||||||
|
|
||||||
- [daviderli614](https://github.com/daviderli614)
|
|
||||||
- [discord9](https://github.com/discord9)
|
|
||||||
- [evenyag](https://github.com/evenyag)
|
|
||||||
- [fengjiachun](https://github.com/fengjiachun)
|
|
||||||
- [fengys1996](https://github.com/fengys1996)
|
|
||||||
- [GrepTime](https://github.com/GrepTime)
|
|
||||||
- [holalengyu](https://github.com/holalengyu)
|
|
||||||
- [killme2008](https://github.com/killme2008)
|
|
||||||
- [MichaelScofield](https://github.com/MichaelScofield)
|
|
||||||
- [shuiyisong](https://github.com/shuiyisong)
|
|
||||||
- [sunchanglong](https://github.com/sunchanglong)
|
|
||||||
- [sunng87](https://github.com/sunng87)
|
|
||||||
- [v0y4g3r](https://github.com/v0y4g3r)
|
|
||||||
- [waynexia](https://github.com/waynexia)
|
|
||||||
- [WenyXu](https://github.com/WenyXu)
|
|
||||||
- [xtang](https://github.com/xtang)
|
|
||||||
|
|
||||||
## All Contributors
|
|
||||||
|
|
||||||
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
|
||||||
@@ -2,11 +2,7 @@
|
|||||||
|
|
||||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||||
|
|
||||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||||
|
|
||||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
|
||||||
|
|
||||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
|
||||||
|
|
||||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||||
|
|
||||||
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
|||||||
|
|
||||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||||
|
|
||||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md)
|
||||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||||
- Check the closed issues before opening your issue.
|
- Check the closed issues before opening your issue.
|
||||||
- Try to follow the existing style of the code.
|
- Try to follow the existing style of the code.
|
||||||
@@ -30,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/main/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@@ -55,18 +51,14 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`.
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
|
|
||||||
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
|
|
||||||
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
|
|
||||||
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
|
|
||||||
|
|
||||||
#### `pre-commit` Hooks
|
#### `pre-commit` Hooks
|
||||||
|
|
||||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||||
|
|
||||||
1. Install `pre-commit`
|
1. Install `pre-commit`
|
||||||
|
|
||||||
pip install pre-commit
|
pip install pre-commit
|
||||||
|
|
||||||
@@ -74,7 +66,7 @@ You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run
|
|||||||
|
|
||||||
brew install pre-commit
|
brew install pre-commit
|
||||||
|
|
||||||
2. Install the `pre-commit` hooks
|
2. Install the `pre-commit` hooks
|
||||||
|
|
||||||
$ pre-commit install
|
$ pre-commit install
|
||||||
pre-commit installed at .git/hooks/pre-commit
|
pre-commit installed at .git/hooks/pre-commit
|
||||||
@@ -112,7 +104,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
|||||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||||
|
|
||||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||||
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
|
|
||||||
Also, see some extra GreptimeDB content:
|
Also, see some extra GreptimeDB content:
|
||||||
|
|
||||||
|
|||||||
11637
Cargo.lock
generated
11637
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
225
Cargo.toml
225
Cargo.toml
@@ -1,72 +1,62 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
|
"benchmarks",
|
||||||
"src/api",
|
"src/api",
|
||||||
"src/auth",
|
"src/auth",
|
||||||
"src/cache",
|
|
||||||
"src/catalog",
|
"src/catalog",
|
||||||
"src/cli",
|
"src/cache",
|
||||||
"src/client",
|
"src/client",
|
||||||
"src/cmd",
|
"src/cmd",
|
||||||
"src/common/base",
|
"src/common/base",
|
||||||
"src/common/catalog",
|
"src/common/catalog",
|
||||||
"src/common/config",
|
"src/common/config",
|
||||||
"src/common/datasource",
|
"src/common/datasource",
|
||||||
"src/common/decimal",
|
|
||||||
"src/common/error",
|
"src/common/error",
|
||||||
"src/common/event-recorder",
|
|
||||||
"src/common/frontend",
|
"src/common/frontend",
|
||||||
"src/common/function",
|
"src/common/function",
|
||||||
|
"src/common/macro",
|
||||||
"src/common/greptimedb-telemetry",
|
"src/common/greptimedb-telemetry",
|
||||||
"src/common/grpc",
|
"src/common/grpc",
|
||||||
"src/common/grpc-expr",
|
"src/common/grpc-expr",
|
||||||
"src/common/macro",
|
|
||||||
"src/common/mem-prof",
|
"src/common/mem-prof",
|
||||||
"src/common/meta",
|
"src/common/meta",
|
||||||
"src/common/options",
|
|
||||||
"src/common/plugins",
|
"src/common/plugins",
|
||||||
"src/common/pprof",
|
|
||||||
"src/common/procedure",
|
"src/common/procedure",
|
||||||
"src/common/procedure-test",
|
"src/common/procedure-test",
|
||||||
"src/common/query",
|
"src/common/query",
|
||||||
"src/common/recordbatch",
|
"src/common/recordbatch",
|
||||||
"src/common/runtime",
|
"src/common/runtime",
|
||||||
"src/common/session",
|
|
||||||
"src/common/sql",
|
|
||||||
"src/common/stat",
|
|
||||||
"src/common/substrait",
|
"src/common/substrait",
|
||||||
"src/common/telemetry",
|
"src/common/telemetry",
|
||||||
"src/common/test-util",
|
"src/common/test-util",
|
||||||
"src/common/time",
|
"src/common/time",
|
||||||
|
"src/common/decimal",
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
"src/common/wal",
|
"src/common/wal",
|
||||||
"src/common/workload",
|
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
"src/datatypes",
|
"src/datatypes",
|
||||||
"src/file-engine",
|
"src/file-engine",
|
||||||
"src/flow",
|
"src/flow",
|
||||||
"src/frontend",
|
"src/frontend",
|
||||||
"src/index",
|
|
||||||
"src/log-query",
|
|
||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
"src/metric-engine",
|
"src/metric-engine",
|
||||||
"src/mito-codec",
|
|
||||||
"src/mito2",
|
"src/mito2",
|
||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
"src/partition",
|
"src/partition",
|
||||||
"src/pipeline",
|
|
||||||
"src/plugins",
|
"src/plugins",
|
||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/standalone",
|
"src/script",
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
|
"src/index",
|
||||||
"tests-fuzz",
|
"tests-fuzz",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
@@ -74,8 +64,8 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.0.0-beta.2"
|
version = "0.8.1"
|
||||||
edition = "2024"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.lints]
|
[workspace.lints]
|
||||||
@@ -83,180 +73,122 @@ clippy.print_stdout = "warn"
|
|||||||
clippy.print_stderr = "warn"
|
clippy.print_stderr = "warn"
|
||||||
clippy.dbg_macro = "warn"
|
clippy.dbg_macro = "warn"
|
||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.result_large_err = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
clippy.large_enum_variant = "allow"
|
|
||||||
clippy.doc_overindented_list_items = "allow"
|
|
||||||
clippy.uninlined_format_args = "allow"
|
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
||||||
|
rust.non_local_definitions = "allow"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||||
#
|
#
|
||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.6"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "56.2", features = ["prettyprint"] }
|
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-buffer = "56.2"
|
arrow-flight = "51.0"
|
||||||
arrow-flight = "56.2"
|
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4"] }
|
||||||
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
|
arrow-schema = { version = "51.0", features = ["serde"] }
|
||||||
arrow-schema = { version = "56.2", features = ["serde"] }
|
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
# Remember to update axum-extra, axum-macros when updating axum
|
axum = { version = "0.6", features = ["headers"] }
|
||||||
axum = "0.8"
|
base64 = "0.21"
|
||||||
axum-extra = "0.10"
|
|
||||||
axum-macros = "0.5"
|
|
||||||
backon = "1"
|
|
||||||
base64 = "0.22"
|
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
|
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
const_format = "0.2"
|
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "6.1"
|
dashmap = "5.4"
|
||||||
datafusion = "50"
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-common = "50"
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-expr = "50"
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-functions = "50"
|
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-functions-aggregate-common = "50"
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-optimizer = "50"
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-orc = "0.5"
|
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-pg-catalog = "0.12.2"
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-physical-expr = "50"
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-physical-plan = "50"
|
derive_builder = "0.12"
|
||||||
datafusion-sql = "50"
|
|
||||||
datafusion-substrait = "50"
|
|
||||||
deadpool = "0.12"
|
|
||||||
deadpool-postgres = "0.14"
|
|
||||||
derive_builder = "0.20"
|
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
either = "1.15"
|
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev = "4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b" }
|
||||||
"tls",
|
|
||||||
"tls-roots",
|
|
||||||
] }
|
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||||
hex = "0.4"
|
|
||||||
http = "1"
|
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
hyper = "1.1"
|
itertools = "0.10"
|
||||||
hyper-util = "0.1"
|
|
||||||
itertools = "0.14"
|
|
||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
local-ip-address = "0.6"
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
|
||||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
|
mockall = "0.11.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
|
||||||
mockall = "0.13"
|
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
nalgebra = "0.33"
|
notify = "6.1"
|
||||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
|
||||||
notify = "8.0"
|
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
object_store_opendal = "0.54"
|
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.30", features = [
|
opentelemetry-proto = { version = "0.5", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
"with-serde",
|
|
||||||
"logs",
|
|
||||||
] }
|
] }
|
||||||
ordered-float = { version = "4.3", features = ["serde"] }
|
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
|
||||||
"server",
|
|
||||||
] }
|
|
||||||
parking_lot = "0.12"
|
|
||||||
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
pretty_assertions = "1.4.0"
|
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.6", features = ["ser"] }
|
promql-parser = { version = "0.4" }
|
||||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
prost = "0.12"
|
||||||
prost-types = "0.13"
|
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.9"
|
rand = "0.8"
|
||||||
ratelimit = "0.10"
|
regex = "1.8"
|
||||||
regex = "1.12"
|
regex-automata = { version = "0.4" }
|
||||||
regex-automata = "0.4"
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
"multipart",
|
"multipart",
|
||||||
] }
|
] }
|
||||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
rskafka = "0.5"
|
||||||
"transport-tls",
|
|
||||||
] }
|
|
||||||
rstest = "0.25"
|
|
||||||
rstest_reuse = "0.7"
|
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
schemars = "0.8"
|
||||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
|
||||||
hostname = "0.4.0"
|
|
||||||
rustls = { version = "0.23.25", default-features = false }
|
|
||||||
sea-query = "0.32"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
simd-json = "0.15"
|
|
||||||
similar-asserts = "1.6.0"
|
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
sysinfo = "0.30"
|
||||||
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
|
# on branch v0.44.x
|
||||||
strum = { version = "0.27", features = ["derive"] }
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
||||||
sysinfo = "0.33"
|
"visitor",
|
||||||
|
] }
|
||||||
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.47", features = ["full"] }
|
tokio = { version = "1.36", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
|
||||||
tokio-stream = "0.1"
|
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
|
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = "0.5"
|
tower = { version = "0.4" }
|
||||||
tower-http = "0.6"
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
tracing = "0.1"
|
|
||||||
tracing-appender = "0.2"
|
|
||||||
tracing-opentelemetry = "0.31.0"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
|
||||||
typetag = "0.2"
|
|
||||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
|
||||||
vrl = "0.25"
|
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
cache = { path = "src/cache" }
|
cache = { path = "src/cache" }
|
||||||
catalog = { path = "src/catalog" }
|
catalog = { path = "src/catalog" }
|
||||||
cli = { path = "src/cli" }
|
|
||||||
client = { path = "src/client" }
|
client = { path = "src/client" }
|
||||||
cmd = { path = "src/cmd", default-features = false }
|
cmd = { path = "src/cmd" }
|
||||||
common-base = { path = "src/common/base" }
|
common-base = { path = "src/common/base" }
|
||||||
common-catalog = { path = "src/common/catalog" }
|
common-catalog = { path = "src/common/catalog" }
|
||||||
common-config = { path = "src/common/config" }
|
common-config = { path = "src/common/config" }
|
||||||
common-datasource = { path = "src/common/datasource" }
|
common-datasource = { path = "src/common/datasource" }
|
||||||
common-decimal = { path = "src/common/decimal" }
|
common-decimal = { path = "src/common/decimal" }
|
||||||
common-error = { path = "src/common/error" }
|
common-error = { path = "src/common/error" }
|
||||||
common-event-recorder = { path = "src/common/event-recorder" }
|
|
||||||
common-frontend = { path = "src/common/frontend" }
|
common-frontend = { path = "src/common/frontend" }
|
||||||
common-function = { path = "src/common/function" }
|
common-function = { path = "src/common/function" }
|
||||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||||
@@ -265,89 +197,62 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
|||||||
common-macro = { path = "src/common/macro" }
|
common-macro = { path = "src/common/macro" }
|
||||||
common-mem-prof = { path = "src/common/mem-prof" }
|
common-mem-prof = { path = "src/common/mem-prof" }
|
||||||
common-meta = { path = "src/common/meta" }
|
common-meta = { path = "src/common/meta" }
|
||||||
common-options = { path = "src/common/options" }
|
|
||||||
common-plugins = { path = "src/common/plugins" }
|
common-plugins = { path = "src/common/plugins" }
|
||||||
common-pprof = { path = "src/common/pprof" }
|
|
||||||
common-procedure = { path = "src/common/procedure" }
|
common-procedure = { path = "src/common/procedure" }
|
||||||
common-procedure-test = { path = "src/common/procedure-test" }
|
common-procedure-test = { path = "src/common/procedure-test" }
|
||||||
common-query = { path = "src/common/query" }
|
common-query = { path = "src/common/query" }
|
||||||
common-recordbatch = { path = "src/common/recordbatch" }
|
common-recordbatch = { path = "src/common/recordbatch" }
|
||||||
common-runtime = { path = "src/common/runtime" }
|
common-runtime = { path = "src/common/runtime" }
|
||||||
common-session = { path = "src/common/session" }
|
|
||||||
common-sql = { path = "src/common/sql" }
|
|
||||||
common-stat = { path = "src/common/stat" }
|
|
||||||
common-telemetry = { path = "src/common/telemetry" }
|
common-telemetry = { path = "src/common/telemetry" }
|
||||||
common-test-util = { path = "src/common/test-util" }
|
common-test-util = { path = "src/common/test-util" }
|
||||||
common-time = { path = "src/common/time" }
|
common-time = { path = "src/common/time" }
|
||||||
common-version = { path = "src/common/version" }
|
common-version = { path = "src/common/version" }
|
||||||
common-wal = { path = "src/common/wal" }
|
common-wal = { path = "src/common/wal" }
|
||||||
common-workload = { path = "src/common/workload" }
|
|
||||||
datanode = { path = "src/datanode" }
|
datanode = { path = "src/datanode" }
|
||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
flow = { path = "src/flow" }
|
flow = { path = "src/flow" }
|
||||||
frontend = { path = "src/frontend", default-features = false }
|
frontend = { path = "src/frontend" }
|
||||||
index = { path = "src/index" }
|
index = { path = "src/index" }
|
||||||
log-query = { path = "src/log-query" }
|
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
metric-engine = { path = "src/metric-engine" }
|
metric-engine = { path = "src/metric-engine" }
|
||||||
mito-codec = { path = "src/mito-codec" }
|
|
||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
partition = { path = "src/partition" }
|
partition = { path = "src/partition" }
|
||||||
pipeline = { path = "src/pipeline" }
|
|
||||||
plugins = { path = "src/plugins" }
|
plugins = { path = "src/plugins" }
|
||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
|
script = { path = "src/script" }
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
standalone = { path = "src/standalone" }
|
|
||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
rev = "80b72716dcde47ec4161478416a5c6c21343364d"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|
||||||
[profile.nightly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
strip = "debuginfo"
|
strip = true
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
debug = false
|
debug = false
|
||||||
incremental = false
|
incremental = false
|
||||||
|
|
||||||
[profile.ci]
|
[profile.ci]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
|
debug = false
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
[profile.dev.package.sqlness-runner]
|
[profile.dev.package.sqlness-runner]
|
||||||
debug = false
|
debug = false
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
[profile.dev.package.tests-fuzz]
|
|
||||||
debug = false
|
|
||||||
strip = true
|
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
[target.aarch64-unknown-linux-gnu]
|
|
||||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
|
||||||
|
|
||||||
[build]
|
[build]
|
||||||
pre-build = [
|
pre-build = [
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||||
@@ -8,8 +5,3 @@ pre-build = [
|
|||||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||||
]
|
]
|
||||||
|
|
||||||
[build.env]
|
|
||||||
passthrough = [
|
|
||||||
"JEMALLOC_SYS_WITH_LG_PAGE",
|
|
||||||
]
|
|
||||||
|
|||||||
70
Makefile
70
Makefile
@@ -8,7 +8,6 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2025-10-01-8fe17d43-20251011080129
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -16,15 +15,12 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
|||||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||||
SQLNESS_OPTS ?=
|
|
||||||
EXTRA_BUILD_ENVS ?=
|
|
||||||
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
|
||||||
|
|
||||||
# The arguments for running integration tests.
|
# The arguments for running integration tests.
|
||||||
ETCD_VERSION ?= v3.5.9
|
ETCD_VERSION ?= v3.5.9
|
||||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||||
RETRY_COUNT ?= 3
|
RETRY_COUNT ?= 3
|
||||||
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend
|
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||||
BUILD_JOBS := 1
|
BUILD_JOBS := 1
|
||||||
@@ -34,10 +30,6 @@ ifneq ($(strip $(BUILD_JOBS)),)
|
|||||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(strip $(BUILD_JOBS)),)
|
|
||||||
SQLNESS_OPTS += --jobs ${BUILD_JOBS}
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||||
endif
|
endif
|
||||||
@@ -66,8 +58,6 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
|
||||||
else
|
else
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||||
endif
|
endif
|
||||||
@@ -85,9 +75,8 @@ build: ## Build debug version greptime.
|
|||||||
.PHONY: build-by-dev-builder
|
.PHONY: build-by-dev-builder
|
||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||||
@@ -101,7 +90,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
|||||||
build-android-bin: ## Build greptime binary for android.
|
build-android-bin: ## Build greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||||
CARGO_PROFILE=release \
|
CARGO_PROFILE=release \
|
||||||
@@ -115,8 +104,8 @@ build-android-bin: ## Build greptime binary for android.
|
|||||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ## Clean the project.
|
clean: ## Clean the project.
|
||||||
@@ -155,7 +144,7 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
|||||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||||
|
|
||||||
.PHONY: multi-platform-buildx
|
.PHONY: multi-platform-buildx
|
||||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||||
@@ -172,17 +161,7 @@ nextest: ## Install nextest tools.
|
|||||||
|
|
||||||
.PHONY: sqlness-test
|
.PHONY: sqlness-test
|
||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness bare ${SQLNESS_OPTS}
|
cargo sqlness
|
||||||
|
|
||||||
RUNS ?= 1
|
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
|
||||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
|
||||||
|
|
||||||
.PHONY: fuzz-ls
|
|
||||||
fuzz-ls: ## List all fuzz targets.
|
|
||||||
cargo fuzz list --fuzz-dir tests-fuzz
|
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
@@ -196,22 +175,9 @@ clippy: ## Check clippy rules.
|
|||||||
fix-clippy: ## Fix clippy violations.
|
fix-clippy: ## Fix clippy violations.
|
||||||
cargo clippy --workspace --all-targets --all-features --fix
|
cargo clippy --workspace --all-targets --all-features --fix
|
||||||
|
|
||||||
.PHONY: check-udeps
|
|
||||||
check-udeps: ## Check unused dependencies.
|
|
||||||
cargo udeps --workspace --all-targets
|
|
||||||
|
|
||||||
.PHONY: fix-udeps
|
|
||||||
fix-udeps: ## Remove unused dependencies automatically.
|
|
||||||
@echo "Running cargo-udeps to find unused dependencies..."
|
|
||||||
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
|
|
||||||
@echo "Removing unused dependencies..."
|
|
||||||
@python3 scripts/fix-udeps.py udeps-report.json
|
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
python3 scripts/check-snafu.py
|
|
||||||
python3 scripts/check-super-imports.py
|
|
||||||
|
|
||||||
.PHONY: start-etcd
|
.PHONY: start-etcd
|
||||||
start-etcd: ## Start single node etcd for testing purpose.
|
start-etcd: ## Start single node etcd for testing purpose.
|
||||||
@@ -225,33 +191,15 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
|||||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||||
|
|
||||||
.PHONY: start-cluster
|
|
||||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
|
||||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
|
||||||
|
|
||||||
.PHONY: stop-cluster
|
|
||||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
|
||||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
|
||||||
|
|
||||||
##@ Grafana
|
|
||||||
|
|
||||||
.PHONY: check-dashboards
|
|
||||||
check-dashboards: ## Check the Grafana dashboards.
|
|
||||||
@./grafana/scripts/check.sh
|
|
||||||
|
|
||||||
.PHONY: dashboards
|
|
||||||
dashboards: ## Generate the Grafana dashboards for standalone mode and intermediate dashboards.
|
|
||||||
@./grafana/scripts/gen-dashboards.sh
|
|
||||||
|
|
||||||
##@ Docs
|
##@ Docs
|
||||||
config-docs: ## Generate configuration documentation from toml files.
|
config-docs: ## Generate configuration documentation from toml files.
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb/config \
|
-w /greptimedb/config \
|
||||||
toml2docs/toml2docs:v0.1.3 \
|
toml2docs/toml2docs:v0.1.1 \
|
||||||
-p '##' \
|
-p '##' \
|
||||||
-t ./config-docs-template.md \
|
-t ./config-docs-template.md \
|
||||||
-o ./config.md
|
-o ./config.md
|
||||||
|
|||||||
220
README.md
220
README.md
@@ -6,15 +6,14 @@
|
|||||||
</picture>
|
</picture>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2>
|
<h1 align="center">Cloud-scale, Fast and Efficient Time Series Database</h1>
|
||||||
|
|
||||||
> Delivers sub-second querying at PB scale and exceptional cost efficiency from edge to cloud.
|
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
|
<a href="https://docs.greptime.com/">User guide</a> |
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
@@ -49,178 +48,143 @@
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [⭐ Key Features](#features)
|
|
||||||
- [Quick Comparison](#quick-comparison)
|
|
||||||
- [Architecture](#architecture)
|
|
||||||
- [Try GreptimeDB](#try-greptimedb)
|
|
||||||
- [Getting Started](#getting-started)
|
|
||||||
- [Build From Source](#build-from-source)
|
|
||||||
- [Tools & Extensions](#tools--extensions)
|
|
||||||
- [Project Status](#project-status)
|
|
||||||
- [Community](#community)
|
|
||||||
- [License](#license)
|
|
||||||
- [Commercial Support](#commercial-support)
|
|
||||||
- [Contributing](#contributing)
|
|
||||||
- [Acknowledgement](#acknowledgement)
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
|
**GreptimeDB** is an open-source time-series database focusing on efficiency, scalability, and analytical capabilities.
|
||||||
|
Designed to work on infrastructure of the cloud era, GreptimeDB benefits users with its elasticity and commodity storage, offering a fast and cost-effective **alternative to InfluxDB** and a **long-term storage for Prometheus**.
|
||||||
|
|
||||||
## Features
|
## Why GreptimeDB
|
||||||
|
|
||||||
| Feature | Description |
|
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
|
||||||
| --------- | ----------- |
|
|
||||||
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
|
||||||
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
|
|
||||||
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
|
|
||||||
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
|
|
||||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
|
|
||||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
|
|
||||||
|
|
||||||
✅ **Perfect for:**
|
* **Easy horizontal scaling**
|
||||||
- Unified observability stack replacing Prometheus + Loki + Tempo
|
|
||||||
- Large-scale metrics with high cardinality (millions to billions of time series)
|
|
||||||
- Large-scale observability platform requiring cost efficiency and scalability
|
|
||||||
- IoT and edge computing with resource and bandwidth constraints
|
|
||||||
|
|
||||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
Seamless scalability from a standalone binary at edge to a robust, highly available distributed cluster in cloud, with a transparent experience for both developers and administrators.
|
||||||
|
|
||||||
## Quick Comparison
|
* **Analyzing time-series data**
|
||||||
|
|
||||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
Query your time-series data with SQL and PromQL. Use Python scripts to facilitate complex analytical tasks.
|
||||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
|
||||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
|
||||||
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
|
|
||||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
|
||||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
|
||||||
| Integration | REST API, SQL, Common protocols | Varies | Varies |
|
|
||||||
|
|
||||||
**Performance:**
|
* **Cloud-native distributed database**
|
||||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
|
||||||
* [TSBS Benchmark](https://github.com/GreptimeTeam/greptimedb/tree/main/docs/benchmarks/tsbs)
|
|
||||||
|
|
||||||
Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/features-that-you-concern#how-is-greptimedbs-performance-compared-to-other-solutions).
|
Fully open-source distributed cluster architecture that harnesses the power of cloud-native elastic computing resources.
|
||||||
|
|
||||||
## Architecture
|
* **Performance and Cost-effective**
|
||||||
|
|
||||||
GreptimeDB can run in two modes:
|
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
|
||||||
* **Standalone Mode** - Single binary for development and small deployments
|
|
||||||
* **Distributed Mode** - Separate components for production scale:
|
|
||||||
- Frontend: Query processing and protocol handling
|
|
||||||
- Datanode: Data storage and retrieval
|
|
||||||
- Metasrv: Metadata management and coordination
|
|
||||||
|
|
||||||
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
* **Compatible with InfluxDB, Prometheus and more protocols**
|
||||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
|
||||||
|
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/clients/overview).
|
||||||
|
|
||||||
## Try GreptimeDB
|
## Try GreptimeDB
|
||||||
|
|
||||||
|
### 1. [GreptimePlay](https://greptime.com/playground)
|
||||||
|
|
||||||
|
Try out the features of GreptimeDB right from your browser.
|
||||||
|
|
||||||
|
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||||
|
|
||||||
|
Start instantly with a free cluster.
|
||||||
|
|
||||||
|
### 3. Docker Image
|
||||||
|
|
||||||
|
To install GreptimeDB locally, the recommended way is via Docker:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker pull greptime/greptimedb
|
docker pull greptime/greptimedb
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Start a GreptimeDB container with:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
|
||||||
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
|
|
||||||
--name greptime --rm \
|
|
||||||
greptime/greptimedb:latest standalone start \
|
|
||||||
--http-addr 0.0.0.0:4000 \
|
|
||||||
--rpc-bind-addr 0.0.0.0:4001 \
|
|
||||||
--mysql-addr 0.0.0.0:4002 \
|
|
||||||
--postgres-addr 0.0.0.0:4003
|
|
||||||
```
|
```
|
||||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
|
||||||
|
|
||||||
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
|
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
|
||||||
|
|
||||||
**Troubleshooting:**
|
|
||||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
|
||||||
* Failed to start? Check the container logs with `docker logs greptime` for further details.
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
- [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
* [Quickstart](https://docs.greptime.com/getting-started/quick-start/overview)
|
||||||
- [User Guide](https://docs.greptime.com/user-guide/overview)
|
* [Write Data](https://docs.greptime.com/user-guide/clients/overview)
|
||||||
- [Demo Scenes](https://github.com/GreptimeTeam/demo-scene)
|
* [Query Data](https://docs.greptime.com/user-guide/query-data/overview)
|
||||||
- [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
* [Operations](https://docs.greptime.com/user-guide/operations/overview)
|
||||||
|
|
||||||
## Build From Source
|
## Build
|
||||||
|
|
||||||
|
Check the prerequisite:
|
||||||
|
|
||||||
**Prerequisites:**
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
||||||
* Python toolchain (optional): Required only if using some test scripts.
|
|
||||||
|
|
||||||
**Build and Run:**
|
Build GreptimeDB binary:
|
||||||
```bash
|
|
||||||
|
```shell
|
||||||
make
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
Run a standalone server:
|
||||||
|
|
||||||
|
```shell
|
||||||
cargo run -- standalone start
|
cargo run -- standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
## Tools & Extensions
|
## Extension
|
||||||
|
|
||||||
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
### Dashboard
|
||||||
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
|
||||||
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
|
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||||
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
|
||||||
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
|
### SDK
|
||||||
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
|
||||||
|
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
|
||||||
|
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
|
||||||
|
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
|
||||||
|
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
|
||||||
|
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||||
|
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||||
|
|
||||||
|
### Grafana Dashboard
|
||||||
|
|
||||||
|
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
> **Status:** Beta — marching toward v1.0 GA!
|
The current version has not yet reached General Availability version standards.
|
||||||
> **GA (v1.0):** January 10, 2026
|
In line with our Greptime 2024 Roadmap, we plan to achieve a production-level
|
||||||
|
version with the update to v1.0 in August. [[Join Force]](https://github.com/GreptimeTeam/greptimedb/issues/3412)
|
||||||
- Deployed in production by open-source projects and commercial users
|
|
||||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
|
||||||
- Suitable for evaluation and pilot deployments
|
|
||||||
|
|
||||||
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
|
|
||||||
|
|
||||||
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
|
|
||||||
|
|
||||||
For production use, we recommend using the latest stable release.
|
|
||||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
|
||||||
|
|
||||||
If you find this project useful, a ⭐ would mean a lot to us!
|
|
||||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
We invite you to engage and contribute!
|
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||||
|
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||||
|
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||||
|
community, please check out:
|
||||||
|
|
||||||
- [Slack](https://greptime.com/slack)
|
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||||
- [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
- [Official Website](https://greptime.com/)
|
- Greptime official [website](https://greptime.com)
|
||||||
- [Blog](https://greptime.com/blogs/)
|
|
||||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
In addition, you may:
|
||||||
- [X (Twitter)](https://X.com/greptime)
|
|
||||||
- [YouTube](https://www.youtube.com/@greptime)
|
- View our official [Blog](https://greptime.com/blogs/)
|
||||||
|
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||||
|
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt).
|
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
|
||||||
|
open contributions and allowing you to use the software however you want.
|
||||||
## Commercial Support
|
|
||||||
|
|
||||||
Running GreptimeDB in your organization?
|
|
||||||
We offer enterprise add-ons, services, training, and consulting.
|
|
||||||
[Contact us](https://greptime.com/contactus) for details.
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
- Read our [Contribution Guidelines](https://github.com/GreptimeTeam/greptimedb/blob/main/CONTRIBUTING.md).
|
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
|
||||||
- Explore [Internal Concepts](https://docs.greptime.com/contributor-guide/overview.html) and [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb).
|
|
||||||
- Pick up a [good first issue](https://github.com/GreptimeTeam/greptimedb/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and join the #contributors [Slack](https://greptime.com/slack) channel.
|
|
||||||
|
|
||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
|
|
||||||
Special thanks to all contributors! See [AUTHORS.md](https://github.com/GreptimeTeam/greptimedb/blob/main/AUTHOR.md).
|
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
|
||||||
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
|
||||||
|
|||||||
38
benchmarks/Cargo.toml
Normal file
38
benchmarks/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
[package]
|
||||||
|
name = "benchmarks"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
api.workspace = true
|
||||||
|
arrow.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
clap.workspace = true
|
||||||
|
client = { workspace = true, features = ["testing"] }
|
||||||
|
common-base.workspace = true
|
||||||
|
common-telemetry.workspace = true
|
||||||
|
common-wal.workspace = true
|
||||||
|
dotenv.workspace = true
|
||||||
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
|
humantime.workspace = true
|
||||||
|
humantime-serde.workspace = true
|
||||||
|
indicatif = "0.17.1"
|
||||||
|
itertools.workspace = true
|
||||||
|
lazy_static.workspace = true
|
||||||
|
log-store.workspace = true
|
||||||
|
mito2.workspace = true
|
||||||
|
num_cpus.workspace = true
|
||||||
|
parquet.workspace = true
|
||||||
|
prometheus.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
rskafka.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
store-api.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
11
benchmarks/README.md
Normal file
11
benchmarks/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Benchmarkers for GreptimeDB
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
## Wal Benchmarker
|
||||||
|
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
|
||||||
|
|
||||||
|
|
||||||
|
### How to use
|
||||||
|
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
|
||||||
|
|
||||||
|
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.
|
||||||
21
benchmarks/config/wal_bench.example.toml
Normal file
21
benchmarks/config/wal_bench.example.toml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
|
||||||
|
wal_provider = "kafka"
|
||||||
|
bootstrap_brokers = ["localhost:9092"]
|
||||||
|
num_workers = 10
|
||||||
|
num_topics = 32
|
||||||
|
num_regions = 1000
|
||||||
|
num_scrapes = 1000
|
||||||
|
num_rows = 5
|
||||||
|
col_types = "ifs"
|
||||||
|
max_batch_size = "512KB"
|
||||||
|
linger = "1ms"
|
||||||
|
backoff_init = "10ms"
|
||||||
|
backoff_max = "1ms"
|
||||||
|
backoff_base = 2
|
||||||
|
backoff_deadline = "3s"
|
||||||
|
compression = "zstd"
|
||||||
|
rng_seed = 42
|
||||||
|
skip_read = false
|
||||||
|
skip_write = false
|
||||||
|
random_topics = true
|
||||||
|
report_metrics = false
|
||||||
326
benchmarks/src/bin/wal_bench.rs
Normal file
326
benchmarks/src/bin/wal_bench.rs
Normal file
@@ -0,0 +1,326 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#![feature(int_roundings)]
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||||
|
use benchmarks::metrics;
|
||||||
|
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
|
||||||
|
use clap::Parser;
|
||||||
|
use common_telemetry::info;
|
||||||
|
use common_wal::config::kafka::common::BackoffConfig;
|
||||||
|
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
|
||||||
|
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||||
|
use common_wal::options::{KafkaWalOptions, WalOptions};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use log_store::kafka::log_store::KafkaLogStore;
|
||||||
|
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||||
|
use mito2::wal::Wal;
|
||||||
|
use prometheus::{Encoder, TextEncoder};
|
||||||
|
use rand::distributions::{Alphanumeric, DistString};
|
||||||
|
use rand::rngs::SmallRng;
|
||||||
|
use rand::SeedableRng;
|
||||||
|
use rskafka::client::partition::Compression;
|
||||||
|
use rskafka::client::ClientBuilder;
|
||||||
|
use store_api::logstore::LogStore;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
|
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
|
||||||
|
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
|
||||||
|
let region_chunks = (0..cfg.num_regions)
|
||||||
|
.map(|id| {
|
||||||
|
build_region(
|
||||||
|
id as u64,
|
||||||
|
topics,
|
||||||
|
&mut SmallRng::seed_from_u64(cfg.rng_seed),
|
||||||
|
cfg,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.chunks(chunk_size as usize)
|
||||||
|
.into_iter()
|
||||||
|
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut write_elapsed = 0;
|
||||||
|
let mut read_elapsed = 0;
|
||||||
|
|
||||||
|
if !cfg.skip_write {
|
||||||
|
info!("Benchmarking write ...");
|
||||||
|
|
||||||
|
let num_scrapes = cfg.num_scrapes;
|
||||||
|
let timer = Instant::now();
|
||||||
|
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||||
|
let wal = wal.clone();
|
||||||
|
let regions = region_chunks[i as usize].clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for _ in 0..num_scrapes {
|
||||||
|
let mut wal_writer = wal.writer();
|
||||||
|
regions
|
||||||
|
.iter()
|
||||||
|
.for_each(|region| region.add_wal_entry(&mut wal_writer));
|
||||||
|
wal_writer.write_to_wal().await.unwrap();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
write_elapsed += timer.elapsed().as_millis();
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.skip_read {
|
||||||
|
info!("Benchmarking read ...");
|
||||||
|
|
||||||
|
let timer = Instant::now();
|
||||||
|
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||||
|
let wal = wal.clone();
|
||||||
|
let regions = region_chunks[i as usize].clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for region in regions.iter() {
|
||||||
|
region.replay(&wal).await;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
read_elapsed = timer.elapsed().as_millis();
|
||||||
|
}
|
||||||
|
|
||||||
|
dump_report(cfg, write_elapsed, read_elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
|
||||||
|
let wal_options = match cfg.wal_provider {
|
||||||
|
WalProvider::Kafka => {
|
||||||
|
assert!(!topics.is_empty());
|
||||||
|
WalOptions::Kafka(KafkaWalOptions {
|
||||||
|
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
WalProvider::RaftEngine => WalOptions::RaftEngine,
|
||||||
|
};
|
||||||
|
Region::new(
|
||||||
|
RegionId::from_u64(id),
|
||||||
|
build_schema(&parse_col_types(&cfg.col_types), rng),
|
||||||
|
wal_options,
|
||||||
|
cfg.num_rows,
|
||||||
|
cfg.rng_seed,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
|
||||||
|
col_types
|
||||||
|
.iter()
|
||||||
|
.map(|col_type| ColumnSchema {
|
||||||
|
column_name: Alphanumeric.sample_string(&mut rng, 5),
|
||||||
|
datatype: *col_type as i32,
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
})
|
||||||
|
.chain(vec![ColumnSchema {
|
||||||
|
column_name: "ts".to_string(),
|
||||||
|
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||||
|
semantic_type: SemanticType::Tag as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
}])
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
|
||||||
|
let cost_report = format!(
|
||||||
|
"write costs: {} ms, read costs: {} ms",
|
||||||
|
write_elapsed, read_elapsed,
|
||||||
|
);
|
||||||
|
|
||||||
|
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
|
||||||
|
let write_throughput = if write_elapsed > 0 {
|
||||||
|
(total_written_bytes * 1000).div_floor(write_elapsed)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
|
||||||
|
let read_throughput = if read_elapsed > 0 {
|
||||||
|
(total_read_bytes * 1000).div_floor(read_elapsed)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
let throughput_report = format!(
|
||||||
|
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
|
||||||
|
total_written_bytes,
|
||||||
|
total_read_bytes,
|
||||||
|
write_throughput,
|
||||||
|
write_throughput.div_floor(1 << 20),
|
||||||
|
read_throughput,
|
||||||
|
read_throughput.div_floor(1 << 20),
|
||||||
|
);
|
||||||
|
|
||||||
|
let metrics_report = if cfg.report_metrics {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
let metrics = prometheus::gather();
|
||||||
|
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||||
|
String::from_utf8(buffer).unwrap()
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
r#"
|
||||||
|
Benchmark config:
|
||||||
|
{cfg:?}
|
||||||
|
|
||||||
|
Benchmark report:
|
||||||
|
{cost_report}
|
||||||
|
{throughput_report}
|
||||||
|
{metrics_report}"#
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_topics(cfg: &Config) -> Vec<String> {
|
||||||
|
// Creates topics.
|
||||||
|
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
|
||||||
|
.build()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let ctrl_client = client.controller_client().unwrap();
|
||||||
|
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
|
||||||
|
.map(|i| {
|
||||||
|
let topic = if cfg.random_topics {
|
||||||
|
format!(
|
||||||
|
"greptime_wal_bench_topic_{}_{}",
|
||||||
|
uuid::Uuid::new_v4().as_u128(),
|
||||||
|
i
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
format!("greptime_wal_bench_topic_{}", i)
|
||||||
|
};
|
||||||
|
let task = ctrl_client.create_topic(
|
||||||
|
topic.clone(),
|
||||||
|
1,
|
||||||
|
cfg.bootstrap_brokers.len() as i16,
|
||||||
|
2000,
|
||||||
|
);
|
||||||
|
(topic, task)
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
// Must ignore errors since we allow topics being created more than once.
|
||||||
|
let _ = futures::future::try_join_all(tasks).await;
|
||||||
|
|
||||||
|
topics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_compression(comp: &str) -> Compression {
|
||||||
|
match comp {
|
||||||
|
"no" => Compression::NoCompression,
|
||||||
|
"gzip" => Compression::Gzip,
|
||||||
|
"lz4" => Compression::Lz4,
|
||||||
|
"snappy" => Compression::Snappy,
|
||||||
|
"zstd" => Compression::Zstd,
|
||||||
|
other => unreachable!("Unrecognized compression {other}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
|
||||||
|
let parts = col_types.split('x').collect::<Vec<_>>();
|
||||||
|
assert!(parts.len() <= 2);
|
||||||
|
|
||||||
|
let pattern = parts[0];
|
||||||
|
let repeat = parts
|
||||||
|
.get(1)
|
||||||
|
.map(|r| r.parse::<usize>().unwrap())
|
||||||
|
.unwrap_or(1);
|
||||||
|
|
||||||
|
pattern
|
||||||
|
.chars()
|
||||||
|
.map(|c| match c {
|
||||||
|
'i' | 'I' => ColumnDataType::Int64,
|
||||||
|
'f' | 'F' => ColumnDataType::Float64,
|
||||||
|
's' | 'S' => ColumnDataType::String,
|
||||||
|
other => unreachable!("Cannot parse {other} as a column data type"),
|
||||||
|
})
|
||||||
|
.cycle()
|
||||||
|
.take(pattern.len() * repeat)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
|
||||||
|
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
let cfg = if !args.cfg_file.is_empty() {
|
||||||
|
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
|
||||||
|
} else {
|
||||||
|
Config::from(args)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Validates arguments.
|
||||||
|
if cfg.num_regions < cfg.num_workers {
|
||||||
|
panic!("num_regions must be greater than or equal to num_workers");
|
||||||
|
}
|
||||||
|
if cfg
|
||||||
|
.num_workers
|
||||||
|
.min(cfg.num_topics)
|
||||||
|
.min(cfg.num_regions)
|
||||||
|
.min(cfg.num_scrapes)
|
||||||
|
.min(cfg.max_batch_size.as_bytes() as u32)
|
||||||
|
.min(cfg.bootstrap_brokers.len() as u32)
|
||||||
|
== 0
|
||||||
|
{
|
||||||
|
panic!("Invalid arguments");
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
.block_on(async {
|
||||||
|
match cfg.wal_provider {
|
||||||
|
WalProvider::Kafka => {
|
||||||
|
let topics = create_topics(&cfg).await;
|
||||||
|
let kafka_cfg = KafkaConfig {
|
||||||
|
broker_endpoints: cfg.bootstrap_brokers.clone(),
|
||||||
|
max_batch_size: cfg.max_batch_size,
|
||||||
|
linger: cfg.linger,
|
||||||
|
backoff: BackoffConfig {
|
||||||
|
init: cfg.backoff_init,
|
||||||
|
max: cfg.backoff_max,
|
||||||
|
base: cfg.backoff_base,
|
||||||
|
deadline: Some(cfg.backoff_deadline),
|
||||||
|
},
|
||||||
|
compression: parse_compression(&cfg.compression),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
|
||||||
|
let wal = Arc::new(Wal::new(store));
|
||||||
|
run_benchmarker(&cfg, &topics, wal).await;
|
||||||
|
}
|
||||||
|
WalProvider::RaftEngine => {
|
||||||
|
// The benchmarker assumes the raft engine directory exists.
|
||||||
|
let store = RaftEngineLogStore::try_new(
|
||||||
|
"/tmp/greptimedb/raft-engine-wal".to_string(),
|
||||||
|
RaftEngineConfig::default(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(Arc::new)
|
||||||
|
.unwrap();
|
||||||
|
let wal = Arc::new(Wal::new(store));
|
||||||
|
run_benchmarker(&cfg, &[], wal).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -12,5 +12,5 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub mod datanode;
|
pub mod metrics;
|
||||||
pub mod memory;
|
pub mod wal_bench;
|
||||||
39
benchmarks/src/metrics.rs
Normal file
39
benchmarks/src/metrics.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use prometheus::*;
|
||||||
|
|
||||||
|
/// Logstore label.
|
||||||
|
pub const LOGSTORE_LABEL: &str = "logstore";
|
||||||
|
/// Operation type label.
|
||||||
|
pub const OPTYPE_LABEL: &str = "optype";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// Counters of bytes of each operation on a logstore.
|
||||||
|
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||||
|
"greptime_bench_wal_op_bytes_total",
|
||||||
|
"wal operation bytes total",
|
||||||
|
&[OPTYPE_LABEL],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
/// Counter of bytes of the append_batch operation.
|
||||||
|
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||||
|
&["write"],
|
||||||
|
);
|
||||||
|
/// Counter of bytes of the read operation.
|
||||||
|
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||||
|
&["read"],
|
||||||
|
);
|
||||||
|
}
|
||||||
366
benchmarks/src/wal_bench.rs
Normal file
366
benchmarks/src/wal_bench.rs
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::mem::size_of;
|
||||||
|
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use api::v1::value::ValueData;
|
||||||
|
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_wal::options::WalOptions;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use mito2::wal::{Wal, WalWriter};
|
||||||
|
use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||||
|
use rand::rngs::SmallRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use store_api::logstore::provider::Provider;
|
||||||
|
use store_api::logstore::LogStore;
|
||||||
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
|
use crate::metrics;
|
||||||
|
|
||||||
|
/// The wal provider.
|
||||||
|
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum WalProvider {
|
||||||
|
#[default]
|
||||||
|
RaftEngine,
|
||||||
|
Kafka,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
pub struct Args {
|
||||||
|
/// The provided configuration file.
|
||||||
|
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
|
||||||
|
#[clap(long, short = 'c')]
|
||||||
|
pub cfg_file: String,
|
||||||
|
|
||||||
|
/// The wal provider.
|
||||||
|
#[clap(long, value_enum, default_value_t = WalProvider::default())]
|
||||||
|
pub wal_provider: WalProvider,
|
||||||
|
|
||||||
|
/// The advertised addresses of the kafka brokers.
|
||||||
|
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
|
||||||
|
#[clap(long, short = 'b', default_value = "localhost:9092")]
|
||||||
|
pub bootstrap_brokers: String,
|
||||||
|
|
||||||
|
/// The number of workers each running in a dedicated thread.
|
||||||
|
#[clap(long, default_value_t = num_cpus::get() as u32)]
|
||||||
|
pub num_workers: u32,
|
||||||
|
|
||||||
|
/// The number of kafka topics to be created.
|
||||||
|
#[clap(long, default_value_t = 32)]
|
||||||
|
pub num_topics: u32,
|
||||||
|
|
||||||
|
/// The number of regions.
|
||||||
|
#[clap(long, default_value_t = 1000)]
|
||||||
|
pub num_regions: u32,
|
||||||
|
|
||||||
|
/// The number of times each region is scraped.
|
||||||
|
#[clap(long, default_value_t = 1000)]
|
||||||
|
pub num_scrapes: u32,
|
||||||
|
|
||||||
|
/// The number of rows in each wal entry.
|
||||||
|
/// Each time a region is scraped, a wal entry containing will be produced.
|
||||||
|
#[clap(long, default_value_t = 5)]
|
||||||
|
pub num_rows: u32,
|
||||||
|
|
||||||
|
/// The column types of the schema for each region.
|
||||||
|
/// Currently, three column types are supported:
|
||||||
|
/// - i = ColumnDataType::Int64
|
||||||
|
/// - f = ColumnDataType::Float64
|
||||||
|
/// - s = ColumnDataType::String
|
||||||
|
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
|
||||||
|
///
|
||||||
|
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
|
||||||
|
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
|
||||||
|
/// This feature is useful if you want to specify many columns.
|
||||||
|
#[clap(long, default_value = "ifs")]
|
||||||
|
pub col_types: String,
|
||||||
|
|
||||||
|
/// The maximum size of a batch of kafka records.
|
||||||
|
/// The default value is 1mb.
|
||||||
|
#[clap(long, default_value = "512KB")]
|
||||||
|
pub max_batch_size: ReadableSize,
|
||||||
|
|
||||||
|
/// The minimum latency the kafka client issues a batch of kafka records.
|
||||||
|
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
|
||||||
|
#[clap(long, default_value = "1ms")]
|
||||||
|
pub linger: String,
|
||||||
|
|
||||||
|
/// The initial backoff delay of the kafka consumer.
|
||||||
|
#[clap(long, default_value = "10ms")]
|
||||||
|
pub backoff_init: String,
|
||||||
|
|
||||||
|
/// The maximum backoff delay of the kafka consumer.
|
||||||
|
#[clap(long, default_value = "1s")]
|
||||||
|
pub backoff_max: String,
|
||||||
|
|
||||||
|
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
|
||||||
|
#[clap(long, default_value_t = 2)]
|
||||||
|
pub backoff_base: u32,
|
||||||
|
|
||||||
|
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
|
||||||
|
#[clap(long, default_value = "3s")]
|
||||||
|
pub backoff_deadline: String,
|
||||||
|
|
||||||
|
/// The client-side compression algorithm for kafka records.
|
||||||
|
#[clap(long, default_value = "zstd")]
|
||||||
|
pub compression: String,
|
||||||
|
|
||||||
|
/// The seed of random number generators.
|
||||||
|
#[clap(long, default_value_t = 42)]
|
||||||
|
pub rng_seed: u64,
|
||||||
|
|
||||||
|
/// Skips the read phase, aka. region replay, if set to true.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub skip_read: bool,
|
||||||
|
|
||||||
|
/// Skips the write phase if set to true.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub skip_write: bool,
|
||||||
|
|
||||||
|
/// Randomly generates topic names if set to true.
|
||||||
|
/// Useful when you want to run the benchmarker without worrying about the topics created before.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub random_topics: bool,
|
||||||
|
|
||||||
|
/// Logs out the gathered prometheus metrics when the benchmarker ends.
|
||||||
|
#[clap(long, default_value_t = false)]
|
||||||
|
pub report_metrics: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Benchmarker config.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
pub wal_provider: WalProvider,
|
||||||
|
pub bootstrap_brokers: Vec<String>,
|
||||||
|
pub num_workers: u32,
|
||||||
|
pub num_topics: u32,
|
||||||
|
pub num_regions: u32,
|
||||||
|
pub num_scrapes: u32,
|
||||||
|
pub num_rows: u32,
|
||||||
|
pub col_types: String,
|
||||||
|
pub max_batch_size: ReadableSize,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub linger: Duration,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_init: Duration,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_max: Duration,
|
||||||
|
pub backoff_base: u32,
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
|
pub backoff_deadline: Duration,
|
||||||
|
pub compression: String,
|
||||||
|
pub rng_seed: u64,
|
||||||
|
pub skip_read: bool,
|
||||||
|
pub skip_write: bool,
|
||||||
|
pub random_topics: bool,
|
||||||
|
pub report_metrics: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Args> for Config {
|
||||||
|
fn from(args: Args) -> Self {
|
||||||
|
let cfg = Self {
|
||||||
|
wal_provider: args.wal_provider,
|
||||||
|
bootstrap_brokers: args
|
||||||
|
.bootstrap_brokers
|
||||||
|
.split(',')
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
num_workers: args.num_workers.min(num_cpus::get() as u32),
|
||||||
|
num_topics: args.num_topics,
|
||||||
|
num_regions: args.num_regions,
|
||||||
|
num_scrapes: args.num_scrapes,
|
||||||
|
num_rows: args.num_rows,
|
||||||
|
col_types: args.col_types,
|
||||||
|
max_batch_size: args.max_batch_size,
|
||||||
|
linger: humantime::parse_duration(&args.linger).unwrap(),
|
||||||
|
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
|
||||||
|
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
|
||||||
|
backoff_base: args.backoff_base,
|
||||||
|
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
|
||||||
|
compression: args.compression,
|
||||||
|
rng_seed: args.rng_seed,
|
||||||
|
skip_read: args.skip_read,
|
||||||
|
skip_write: args.skip_write,
|
||||||
|
random_topics: args.random_topics,
|
||||||
|
report_metrics: args.report_metrics,
|
||||||
|
};
|
||||||
|
|
||||||
|
cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The region used for wal benchmarker.
|
||||||
|
pub struct Region {
|
||||||
|
id: RegionId,
|
||||||
|
schema: Vec<ColumnSchema>,
|
||||||
|
provider: Provider,
|
||||||
|
next_sequence: AtomicU64,
|
||||||
|
next_entry_id: AtomicU64,
|
||||||
|
next_timestamp: AtomicI64,
|
||||||
|
rng: Mutex<Option<SmallRng>>,
|
||||||
|
num_rows: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Region {
|
||||||
|
/// Creates a new region.
|
||||||
|
pub fn new(
|
||||||
|
id: RegionId,
|
||||||
|
schema: Vec<ColumnSchema>,
|
||||||
|
wal_options: WalOptions,
|
||||||
|
num_rows: u32,
|
||||||
|
rng_seed: u64,
|
||||||
|
) -> Self {
|
||||||
|
let provider = match wal_options {
|
||||||
|
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||||
|
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
id,
|
||||||
|
schema,
|
||||||
|
provider,
|
||||||
|
next_sequence: AtomicU64::new(1),
|
||||||
|
next_entry_id: AtomicU64::new(1),
|
||||||
|
next_timestamp: AtomicI64::new(1655276557000),
|
||||||
|
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
|
||||||
|
num_rows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scrapes the region and adds the generated entry to wal.
|
||||||
|
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
|
||||||
|
let mutation = Mutation {
|
||||||
|
op_type: OpType::Put as i32,
|
||||||
|
sequence: self
|
||||||
|
.next_sequence
|
||||||
|
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
|
||||||
|
rows: Some(self.build_rows()),
|
||||||
|
};
|
||||||
|
let entry = WalEntry {
|
||||||
|
mutations: vec![mutation],
|
||||||
|
};
|
||||||
|
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
|
||||||
|
wal_writer
|
||||||
|
.add_entry(
|
||||||
|
self.id,
|
||||||
|
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||||
|
&entry,
|
||||||
|
&self.provider,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replays the region.
|
||||||
|
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||||
|
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||||
|
while let Some(res) = wal_stream.next().await {
|
||||||
|
let (_, entry) = res.unwrap();
|
||||||
|
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the estimated size in bytes of the entry.
|
||||||
|
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
|
||||||
|
let wrapper_size = size_of::<WalEntry>()
|
||||||
|
+ entry.mutations.capacity() * size_of::<Mutation>()
|
||||||
|
+ size_of::<Rows>();
|
||||||
|
|
||||||
|
let rows = entry.mutations[0].rows.as_ref().unwrap();
|
||||||
|
|
||||||
|
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
|
||||||
|
+ rows
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.column_name.capacity())
|
||||||
|
.sum::<usize>();
|
||||||
|
let values_size = (rows.rows.capacity() * size_of::<Row>())
|
||||||
|
+ rows
|
||||||
|
.rows
|
||||||
|
.iter()
|
||||||
|
.map(|r| r.values.capacity() * size_of::<Value>())
|
||||||
|
.sum::<usize>();
|
||||||
|
|
||||||
|
wrapper_size + schema_size + values_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_rows(&self) -> Rows {
|
||||||
|
let cols = self
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|col_schema| {
|
||||||
|
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
|
||||||
|
self.build_col(&col_data_type, self.num_rows)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let rows = (0..self.num_rows)
|
||||||
|
.map(|i| {
|
||||||
|
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
|
||||||
|
Row { values }
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Rows {
|
||||||
|
schema: self.schema.clone(),
|
||||||
|
rows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
|
||||||
|
let mut rng_guard = self.rng.lock().unwrap();
|
||||||
|
let rng = rng_guard.as_mut().unwrap();
|
||||||
|
match col_data_type {
|
||||||
|
ColumnDataType::TimestampMillisecond => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::Int64 => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = rng.sample(Uniform::new(0, 10_000));
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::I64Value(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::Float64 => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = rng.sample(Uniform::new(0.0, 5000.0));
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::F64Value(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
ColumnDataType::String => (0..num_rows)
|
||||||
|
.map(|_| {
|
||||||
|
let v = Alphanumeric.sample_string(rng, 10);
|
||||||
|
Value {
|
||||||
|
value_data: Some(ValueData::StringValue(v)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
- [Configurations](#configurations)
|
- [Standalone Mode](#standalone-mode)
|
||||||
- [Standalone Mode](#standalone-mode)
|
- [Distributed Mode](#distributed-mode)
|
||||||
- [Distributed Mode](#distributed-mode)
|
|
||||||
- [Frontend](#frontend)
|
- [Frontend](#frontend)
|
||||||
- [Metasrv](#metasrv)
|
- [Metasrv](#metasrv)
|
||||||
- [Datanode](#datanode)
|
- [Datanode](#datanode)
|
||||||
- [Flownode](#flownode)
|
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
@@ -25,7 +23,3 @@
|
|||||||
### Datanode
|
### Datanode
|
||||||
|
|
||||||
{{ toml2docs "./datanode.example.toml" }}
|
{{ toml2docs "./datanode.example.toml" }}
|
||||||
|
|
||||||
### Flownode
|
|
||||||
|
|
||||||
{{ toml2docs "./flownode.example.toml"}}
|
|
||||||
650
config/config.md
650
config/config.md
@@ -1,136 +1,98 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
- [Configurations](#configurations)
|
- [Standalone Mode](#standalone-mode)
|
||||||
- [Standalone Mode](#standalone-mode)
|
- [Distributed Mode](#distributed-mode)
|
||||||
- [Distributed Mode](#distributed-mode)
|
|
||||||
- [Frontend](#frontend)
|
- [Frontend](#frontend)
|
||||||
- [Metasrv](#metasrv)
|
- [Metasrv](#metasrv)
|
||||||
- [Datanode](#datanode)
|
- [Datanode](#datanode)
|
||||||
- [Flownode](#flownode)
|
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
|
||||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
||||||
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
||||||
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
|
||||||
| `flow` | -- | -- | flow engine options. |
|
|
||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
|
||||||
| `query` | -- | -- | The query engine options. |
|
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
|
||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
@@ -138,82 +100,50 @@
|
|||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
|
||||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
|
||||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
|
||||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
|
||||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
|
||||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
## Distributed Mode
|
## Distributed Mode
|
||||||
@@ -222,240 +152,161 @@
|
|||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
|
|
||||||
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
|
|
||||||
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
| `mysql.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
|
||||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
| `query` | -- | -- | The query engine options. |
|
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
|
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
|
||||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
|
||||||
|
|
||||||
|
|
||||||
### Metasrv
|
### Metasrv
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
||||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
| `store_addr` | String | `127.0.0.1:2379` | Etcd server address. |
|
||||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
| `selector` | String | `lease_based` | Datanode selector type.<br/>- `lease_based` (default value).<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
|
|
||||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
|
||||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
|
||||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
|
||||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
|
||||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
|
||||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
|
||||||
| `failure_detector` | -- | -- | -- |
|
| `failure_detector` | -- | -- | -- |
|
||||||
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. |
|
| `failure_detector.threshold` | Float | `8.0` | -- |
|
||||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. |
|
| `failure_detector.min_std_deviation` | String | `100ms` | -- |
|
||||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. |
|
| `failure_detector.acceptable_heartbeat_pause` | String | `3000ms` | -- |
|
||||||
|
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | -- |
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
| `datanode.client.timeout` | String | `10s` | -- |
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
| `wal` | -- | -- | -- |
|
| `wal` | -- | -- | -- |
|
||||||
| `wal.provider` | String | `raft_engine` | -- |
|
| `wal.provider` | String | `raft_engine` | -- |
|
||||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. |
|
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
|
||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.num_topics` | Integer | `64` | Number of topics to be created upon start. |
|
||||||
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. |
|
||||||
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
|
||||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
|
||||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
|
||||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
|
||||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
### Datanode
|
### Datanode
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||||
@@ -463,195 +314,90 @@
|
|||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
| `wal` | -- | -- | The WAL options. |
|
| `wal` | -- | -- | The WAL options. |
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka.<br/>- `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.** |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | `None` | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
|
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `query` | -- | -- | The query engine options. |
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
| `storage.cache_path` | String | `None` | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
| `storage.cache_capacity` | String | `None` | The local file cache capacity in bytes. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.bucket` | String | `None` | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.root` | String | `None` | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.access_key_id` | String | `None` | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
| `storage.secret_access_key` | String | `None` | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
| `storage.access_key_secret` | String | `None` | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
| `storage.account_name` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.account_key` | String | `None` | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.scope` | String | `None` | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.credential_path` | String | `None` | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.container` | String | `None` | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
| `storage.sas_token` | String | `None` | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.endpoint` | String | `None` | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
| `storage.region` | String | `None` | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
|
||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||||
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
|
|
||||||
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
|
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
| `region_engine.mito.max_background_jobs` | Integer | `4` | Max number of running background jobs |
|
||||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
| `region_engine.mito.global_write_buffer_size` | String | `1GB` | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
| `region_engine.mito.global_write_buffer_reject_size` | String | `2GB` | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
|
||||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
|
||||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
|
||||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
|
||||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
|
||||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
| `logging.otlp_endpoint` | String | `None` | The OTLP tracing endpoint. |
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
|
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||||
|
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||||
|
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||||
|
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself |
|
||||||
|
| `export_metrics.self_import.db` | String | `None` | -- |
|
||||||
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
### Flownode
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
|
||||||
| `flow` | -- | -- | flow engine options. |
|
|
||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
|
||||||
| `flow.batching_mode` | -- | -- | -- |
|
|
||||||
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
|
|
||||||
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
|
|
||||||
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
|
|
||||||
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
|
|
||||||
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
|
|
||||||
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
|
|
||||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
|
||||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
|
||||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
|
||||||
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
|
|
||||||
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
|
|
||||||
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
|
|
||||||
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
|
|
||||||
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
|
|
||||||
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
|
||||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
|
||||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
|
||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `query` | -- | -- | -- |
|
|
||||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
## The datanode identifier and should be unique in the cluster.
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
## @toml2docs:none-default
|
mode = "standalone"
|
||||||
node_id = 42
|
|
||||||
|
|
||||||
## The default column prefix for auto-created time index and value columns.
|
## The datanode identifier and should be unique in the cluster.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
default_column_prefix = "greptime"
|
node_id = 42
|
||||||
|
|
||||||
## Start services after regions have obtained leases.
|
## Start services after regions have obtained leases.
|
||||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
@@ -14,74 +13,24 @@ require_lease_before_startup = false
|
|||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
|
|
||||||
## Parallelism of initializing regions.
|
## The gRPC address of the datanode.
|
||||||
init_regions_parallelism = 16
|
rpc_addr = "127.0.0.1:3001"
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The hostname of the datanode.
|
||||||
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
## +toml2docs:none-default
|
||||||
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
rpc_hostname = "127.0.0.1"
|
||||||
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
|
||||||
max_concurrent_queries = 0
|
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## The number of gRPC server worker threads.
|
||||||
#+ enable_telemetry = true
|
rpc_runtime_size = 8
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:3001"
|
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:3001"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
max_recv_message_size = "512MB"
|
rpc_max_recv_message_size = "512MB"
|
||||||
|
|
||||||
## The maximum send message size for gRPC server.
|
## The maximum send message size for gRPC server.
|
||||||
max_send_message_size = "512MB"
|
rpc_max_send_message_size = "512MB"
|
||||||
## Compression mode for datanode side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## Enable telemetry to collect anonymous usage data.
|
||||||
[grpc.tls]
|
enable_telemetry = true
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
|
||||||
## For now, gRPC tls config does not support auto reload.
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
@@ -99,6 +48,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
|||||||
## Operation timeout.
|
## Operation timeout.
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
|
|
||||||
|
## Heartbeat timeout.
|
||||||
|
heartbeat_timeout = "500ms"
|
||||||
|
|
||||||
## DDL timeout.
|
## DDL timeout.
|
||||||
ddl_timeout = "10s"
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
@@ -122,25 +74,24 @@ metadata_cache_tti = "5m"
|
|||||||
## The provider of the WAL.
|
## The provider of the WAL.
|
||||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||||
## - `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.**
|
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
## The directory to store the WAL files.
|
## The directory to store the WAL files.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
dir = "./greptimedb_data/wal"
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "128MB"
|
file_size = "256MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a purge.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "1GB"
|
purge_threshold = "4GB"
|
||||||
|
|
||||||
## The interval to trigger a purge.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "1m"
|
purge_interval = "10m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -162,9 +113,6 @@ prefill_log_files = false
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_period = "10s"
|
sync_period = "10s"
|
||||||
|
|
||||||
## Parallelism during WAL recovery.
|
|
||||||
recovery_parallelism = 2
|
|
||||||
|
|
||||||
## The Kafka broker endpoints.
|
## The Kafka broker endpoints.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
@@ -172,48 +120,31 @@ broker_endpoints = ["127.0.0.1:9092"]
|
|||||||
## The max size of a single producer batch.
|
## The max size of a single producer batch.
|
||||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
max_batch_bytes = "1MB"
|
max_batch_size = "1MB"
|
||||||
|
|
||||||
|
## The linger duration of a kafka batch producer.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
linger = "200ms"
|
||||||
|
|
||||||
## The consumer wait timeout.
|
## The consumer wait timeout.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
consumer_wait_timeout = "100ms"
|
consumer_wait_timeout = "100ms"
|
||||||
|
|
||||||
## Whether to enable WAL index creation.
|
## The initial backoff delay.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
create_index = true
|
backoff_init = "500ms"
|
||||||
|
|
||||||
## The interval for dumping WAL indexes.
|
## The maximum backoff delay.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
dump_index_interval = "60s"
|
backoff_max = "10s"
|
||||||
|
|
||||||
## Ignore missing entries during read WAL.
|
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
##
|
backoff_base = 2
|
||||||
## This option ensures that when Kafka messages are deleted, the system
|
|
||||||
## can still successfully replay memtable data without throwing an
|
|
||||||
## out-of-range error.
|
|
||||||
## However, enabling this option might lead to unexpected data loss,
|
|
||||||
## as the system will skip over missing entries instead of treating
|
|
||||||
## them as critical errors.
|
|
||||||
overwrite_entry_start_id = false
|
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
## The deadline of retries.
|
||||||
# **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
# Available SASL mechanisms:
|
backoff_deadline = "5mins"
|
||||||
# - `PLAIN`
|
|
||||||
# - `SCRAM-SHA-256`
|
|
||||||
# - `SCRAM-SHA-512`
|
|
||||||
# [wal.sasl]
|
|
||||||
# type = "SCRAM-SHA-512"
|
|
||||||
# username = "user_kafka"
|
|
||||||
# password = "secret"
|
|
||||||
|
|
||||||
# The Kafka TLS configuration.
|
|
||||||
# **It's only used when the provider is `kafka`**.
|
|
||||||
# [wal.tls]
|
|
||||||
# server_ca_cert_path = "/path/to/server_cert"
|
|
||||||
# client_cert_path = "/path/to/client_cert"
|
|
||||||
# client_key_path = "/path/to/key"
|
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -224,7 +155,6 @@ overwrite_entry_start_id = false
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
# enable_virtual_host_style = false
|
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -252,26 +182,12 @@ overwrite_entry_start_id = false
|
|||||||
# root = "data"
|
# root = "data"
|
||||||
# scope = "test"
|
# scope = "test"
|
||||||
# credential_path = "123456"
|
# credential_path = "123456"
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The query engine options.
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine.
|
|
||||||
## Default to 0, which means the number of CPU cores.
|
|
||||||
parallelism = 0
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans.
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
## The storage type used to store the data.
|
## The storage type used to store the data.
|
||||||
## - `File`: the data is stored in the local file system.
|
## - `File`: the data is stored in the local file system.
|
||||||
@@ -281,130 +197,87 @@ data_home = "./greptimedb_data"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## Cache configuration for object storage such as 'S3' etc.
|
||||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
## The local file cache directory.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
#+ cache_path = ""
|
cache_path = "/path/local_cache"
|
||||||
|
|
||||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
## The local file cache capacity in bytes.
|
||||||
#+ enable_read_cache = true
|
## +toml2docs:none-default
|
||||||
|
cache_capacity = "256MB"
|
||||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cache_capacity = "5GiB"
|
|
||||||
|
|
||||||
## The S3 bucket name.
|
## The S3 bucket name.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
bucket = "greptimedb"
|
bucket = "greptimedb"
|
||||||
|
|
||||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
root = "greptimedb"
|
root = "greptimedb"
|
||||||
|
|
||||||
## The access key id of the aws account.
|
## The access key id of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
access_key_id = "test"
|
access_key_id = "test"
|
||||||
|
|
||||||
## The secret access key of the aws account.
|
## The secret access key of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3`**.
|
## **It's only used when the storage type is `S3`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
secret_access_key = "test"
|
secret_access_key = "test"
|
||||||
|
|
||||||
## The secret access key of the aliyun account.
|
## The secret access key of the aliyun account.
|
||||||
## **It's only used when the storage type is `Oss`**.
|
## **It's only used when the storage type is `Oss`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
access_key_secret = "test"
|
access_key_secret = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
account_name = "test"
|
account_name = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
account_key = "test"
|
account_key = "test"
|
||||||
|
|
||||||
## The scope of the google cloud storage.
|
## The scope of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
scope = "test"
|
scope = "test"
|
||||||
|
|
||||||
## The credential path of the google cloud storage.
|
## The credential path of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
credential_path = "test"
|
credential_path = "test"
|
||||||
|
|
||||||
## The credential of the google cloud storage.
|
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
credential = "base64-credential"
|
|
||||||
|
|
||||||
## The container of the azure account.
|
## The container of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
container = "greptimedb"
|
container = "greptimedb"
|
||||||
|
|
||||||
## The sas token of the azure account.
|
## The sas token of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
sas_token = ""
|
sas_token = ""
|
||||||
|
|
||||||
## The endpoint of the S3 service.
|
## The endpoint of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
endpoint = "https://s3.amazonaws.com"
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
## The region of the S3 service.
|
## The region of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
|
|
||||||
## The http client options to the storage.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
|
||||||
[storage.http_client]
|
|
||||||
|
|
||||||
## The maximum idle connection per host allowed in the pool.
|
|
||||||
pool_max_idle_per_host = 1024
|
|
||||||
|
|
||||||
## The timeout for only the connect phase of a http client.
|
|
||||||
connect_timeout = "30s"
|
|
||||||
|
|
||||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
|
||||||
## Also considered a total deadline.
|
|
||||||
timeout = "30s"
|
|
||||||
|
|
||||||
## The timeout for idle sockets being kept-alive.
|
|
||||||
pool_idle_timeout = "90s"
|
|
||||||
|
|
||||||
## To skip the ssl verification
|
|
||||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
|
||||||
skip_ssl_validation = false
|
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "S3"
|
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# access_key_id = "test"
|
|
||||||
# secret_access_key = "123456"
|
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
|
||||||
# region = "us-west-2"
|
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "Gcs"
|
|
||||||
# type = "Gcs"
|
# type = "Gcs"
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# scope = "test"
|
|
||||||
# credential_path = "123456"
|
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
|
||||||
|
|
||||||
## The region engine options. You can configure multiple region engines.
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
@@ -413,7 +286,7 @@ skip_ssl_validation = false
|
|||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
|
|
||||||
## Number of region workers.
|
## Number of region workers.
|
||||||
#+ num_workers = 8
|
num_workers = 8
|
||||||
|
|
||||||
## Request channel size of each worker.
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
@@ -424,223 +297,85 @@ worker_request_batch_size = 64
|
|||||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
|
|
||||||
|
|
||||||
## Number of removed files to keep in manifest's `removed_files` field before also
|
|
||||||
## remove them from `removed_files`. Mostly for debugging purpose.
|
|
||||||
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
|
|
||||||
## from `removed_files` field.
|
|
||||||
experimental_manifest_keep_removed_file_count = 256
|
|
||||||
|
|
||||||
## How long to keep removed files in the `removed_files` field of manifest
|
|
||||||
## after they are removed from manifest.
|
|
||||||
## files will only be removed from `removed_files` field
|
|
||||||
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
|
|
||||||
experimental_manifest_keep_removed_file_ttl = "1h"
|
|
||||||
|
|
||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
|
||||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
## Max number of running background jobs
|
||||||
## @toml2docs:none-default="Auto"
|
max_background_jobs = 4
|
||||||
#+ max_background_flushes = 4
|
|
||||||
|
|
||||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_compactions = 2
|
|
||||||
|
|
||||||
## Max number of running background purge jobs (default: number of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_purges = 8
|
|
||||||
|
|
||||||
## Interval to auto flush a region if it has not flushed yet.
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
|
|
||||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
## @toml2docs:none-default="Auto"
|
global_write_buffer_size = "1GB"
|
||||||
#+ global_write_buffer_size = "1GB"
|
|
||||||
|
|
||||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||||
## @toml2docs:none-default="Auto"
|
global_write_buffer_reject_size = "2GB"
|
||||||
#+ global_write_buffer_reject_size = "2GB"
|
|
||||||
|
|
||||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
## @toml2docs:none-default="Auto"
|
sst_meta_cache_size = "128MB"
|
||||||
#+ sst_meta_cache_size = "128MB"
|
|
||||||
|
|
||||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
## @toml2docs:none-default="Auto"
|
vector_cache_size = "512MB"
|
||||||
#+ vector_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/8 of OS memory.
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ page_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
## @toml2docs:none-default="Auto"
|
page_cache_size = "512MB"
|
||||||
#+ selector_result_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the experimental write cache.
|
||||||
enable_write_cache = false
|
enable_experimental_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}`.
|
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||||
write_cache_path = ""
|
experimental_write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache.
|
||||||
write_cache_size = "5GiB"
|
experimental_write_cache_size = "512MB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
experimental_write_cache_ttl = "1h"
|
||||||
write_cache_ttl = "8h"
|
|
||||||
|
|
||||||
## Preload index (puffin) files into cache on region open (default: true).
|
|
||||||
## When enabled, index files are loaded into the write cache during region initialization,
|
|
||||||
## which can improve query performance at the cost of longer startup times.
|
|
||||||
preload_index_cache = true
|
|
||||||
|
|
||||||
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
|
||||||
## The remaining capacity is used for data (parquet) files.
|
|
||||||
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
|
||||||
## 1GiB is reserved for index files and 4GiB for data files.
|
|
||||||
index_cache_percent = 20
|
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
|
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
|
## - `0`: using the default value (1/4 of cpu cores).
|
||||||
|
## - `1`: scan in current thread.
|
||||||
|
## - `n`: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Maximum number of SST files to scan concurrently.
|
|
||||||
max_concurrent_scan_files = 384
|
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
## Memory limit for table scans across all queries.
|
|
||||||
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit.
|
|
||||||
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
|
||||||
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
|
||||||
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
|
||||||
scan_memory_limit = "50%"
|
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
|
||||||
min_compaction_interval = "0m"
|
|
||||||
|
|
||||||
## Whether to enable experimental flat format as the default format.
|
|
||||||
default_experimental_flat_format = false
|
|
||||||
|
|
||||||
## The options for index in Mito engine.
|
|
||||||
[region_engine.mito.index]
|
|
||||||
|
|
||||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
|
||||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
|
||||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
|
||||||
##
|
|
||||||
## This path contains two subdirectories:
|
|
||||||
## - `__intm`: for storing intermediate files used during creating index.
|
|
||||||
## - `staging`: for storing staging files used during searching index.
|
|
||||||
aux_path = ""
|
|
||||||
|
|
||||||
## The max capacity of the staging directory.
|
|
||||||
staging_size = "2GB"
|
|
||||||
|
|
||||||
## The TTL of the staging directory.
|
|
||||||
## Defaults to 7 days.
|
|
||||||
## Setting it to "0s" to disable TTL.
|
|
||||||
staging_ttl = "7d"
|
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## Cache size for index result.
|
|
||||||
result_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
## Whether to create the index on flush.
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_flush = "auto"
|
create_on_flush = "auto"
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
## Whether to create the index on compaction.
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_compaction = "auto"
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
## Whether to apply the index on query
|
## Whether to apply the index on query
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
apply_on_query = "auto"
|
apply_on_query = "auto"
|
||||||
|
|
||||||
## Memory threshold for performing an external sort during index creation.
|
## Memory threshold for performing an external sort during index creation.
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||||
## - `unlimited`: no memory limit
|
mem_threshold_on_create = "64M"
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
|
||||||
[region_engine.mito.fulltext_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## The options for bloom filter index in Mito engine.
|
|
||||||
[region_engine.mito.bloom_filter_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for the index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
[region_engine.mito.memtable]
|
[region_engine.mito.memtable]
|
||||||
## Memtable type.
|
## Memtable type.
|
||||||
## - `time_series`: time-series memtable
|
## - `time_series`: time-series memtable
|
||||||
@@ -659,66 +394,55 @@ data_freeze_threshold = 32768
|
|||||||
## Only available for `partition_tree` memtable.
|
## Only available for `partition_tree` memtable.
|
||||||
fork_dictionary_bytes = "1GiB"
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Enable the file engine.
|
|
||||||
[region_engine.file]
|
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Metric engine options.
|
|
||||||
[region_engine.metric]
|
|
||||||
## Whether to use sparse primary key encoding.
|
|
||||||
sparse_primary_key_encoding = true
|
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files.
|
||||||
dir = "./greptimedb_data/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
#+ [tracing]
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
## The tokio console address.
|
[export_metrics]
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
## The memory options.
|
## whether enable export metrics.
|
||||||
[memory]
|
enable = false
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
## The interval of export metrics.
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
write_interval = "30s"
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -1,171 +0,0 @@
|
|||||||
## The flownode identifier and should be unique in the cluster.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
node_id = 14
|
|
||||||
|
|
||||||
## flow engine options.
|
|
||||||
[flow]
|
|
||||||
## The number of flow worker in flownode.
|
|
||||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
|
||||||
#+num_workers=0
|
|
||||||
[flow.batching_mode]
|
|
||||||
## The default batching engine query timeout is 10 minutes.
|
|
||||||
#+query_timeout="600s"
|
|
||||||
## will output a warn log for any query that runs for more that this threshold
|
|
||||||
#+slow_query_threshold="60s"
|
|
||||||
## The minimum duration between two queries execution by batching mode task
|
|
||||||
#+experimental_min_refresh_duration="5s"
|
|
||||||
## The gRPC connection timeout
|
|
||||||
#+grpc_conn_timeout="5s"
|
|
||||||
## The gRPC max retry number
|
|
||||||
#+experimental_grpc_max_retries=3
|
|
||||||
## Flow wait for available frontend timeout,
|
|
||||||
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
|
|
||||||
## which prevent flownode from starting
|
|
||||||
#+experimental_frontend_scan_timeout="30s"
|
|
||||||
## Frontend activity timeout
|
|
||||||
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
|
|
||||||
## it will be removed from the list that flownode use to connect
|
|
||||||
#+experimental_frontend_activity_timeout="60s"
|
|
||||||
## Maximum number of filters allowed in a single query
|
|
||||||
#+experimental_max_filter_num_per_query=20
|
|
||||||
## Time window merge distance
|
|
||||||
#+experimental_time_window_merge_threshold=3
|
|
||||||
## Read preference of the Frontend client.
|
|
||||||
#+read_preference="Leader"
|
|
||||||
[flow.batching_mode.frontend_tls]
|
|
||||||
## Whether to enable TLS for client.
|
|
||||||
#+enabled=false
|
|
||||||
## Server Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+server_ca_cert_path=""
|
|
||||||
## Client Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+client_cert_path=""
|
|
||||||
## Client Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+client_key_path=""
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:6800"
|
|
||||||
## The address advertised to the metasrv,
|
|
||||||
## and used for connections from outside the host
|
|
||||||
server_addr = "127.0.0.1:6800"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
|
||||||
## The maximum receive message size for gRPC server.
|
|
||||||
max_recv_message_size = "512MB"
|
|
||||||
## The maximum send message size for gRPC server.
|
|
||||||
max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## The metasrv client options.
|
|
||||||
[meta_client]
|
|
||||||
## The addresses of the metasrv.
|
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
|
||||||
|
|
||||||
## Operation timeout.
|
|
||||||
timeout = "3s"
|
|
||||||
|
|
||||||
## DDL timeout.
|
|
||||||
ddl_timeout = "10s"
|
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "1s"
|
|
||||||
|
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
|
||||||
|
|
||||||
## The configuration about the cache of the metadata.
|
|
||||||
metadata_cache_max_capacity = 100000
|
|
||||||
|
|
||||||
## TTL of the metadata cache.
|
|
||||||
metadata_cache_ttl = "10m"
|
|
||||||
|
|
||||||
# TTI of the metadata cache.
|
|
||||||
metadata_cache_tti = "5m"
|
|
||||||
|
|
||||||
## The heartbeat options.
|
|
||||||
[heartbeat]
|
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
|
||||||
interval = "3s"
|
|
||||||
|
|
||||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
|
||||||
retry_interval = "3s"
|
|
||||||
|
|
||||||
## The logging options.
|
|
||||||
[logging]
|
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
|
||||||
dir = "./greptimedb_data/logs"
|
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
|
||||||
enable_otlp_tracing = false
|
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
|
||||||
append_stdout = true
|
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
||||||
[logging.tracing_sample_ratio]
|
|
||||||
default_ratio = 1.0
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
||||||
#+ [tracing]
|
|
||||||
## The tokio console address.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine for query sent by flownode.
|
|
||||||
## Default to 1, so it won't use too much cpu or memory
|
|
||||||
parallelism = 1
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans.
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
@@ -1,22 +1,10 @@
|
|||||||
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
|
mode = "standalone"
|
||||||
|
|
||||||
## The default timezone of the server.
|
## The default timezone of the server.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
## The default column prefix for auto-created time index and value columns.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
default_column_prefix = "greptime"
|
|
||||||
|
|
||||||
## The maximum in-flight write bytes.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_in_flight_write_bytes = "500MB"
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
@@ -29,55 +17,18 @@ retry_interval = "3s"
|
|||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
## HTTP request timeout.
|
||||||
timeout = "0s"
|
timeout = "30s"
|
||||||
## HTTP request body limit.
|
## HTTP request body limit.
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
## Maximum total memory for all concurrent HTTP request bodies.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_body_memory = "1GB"
|
|
||||||
## HTTP CORS support, it's turned on by default
|
|
||||||
## This allows browser to access http APIs without CORS restrictions
|
|
||||||
enable_cors = true
|
|
||||||
## Customize allowed origins for HTTP CORS.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cors_allowed_origins = ["https://example.com"]
|
|
||||||
## Whether to enable validation for Prometheus remote write requests.
|
|
||||||
## Available options:
|
|
||||||
## - strict: deny invalid UTF-8 strings (default).
|
|
||||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
|
||||||
## - unchecked: do not valid strings.
|
|
||||||
prom_validation_mode = "strict"
|
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
bind_addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:4001"
|
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## Maximum total memory for all concurrent gRPC request messages.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_message_memory = "1GB"
|
|
||||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
## The maximum connection age for gRPC connection.
|
|
||||||
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
|
||||||
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_connection_age = "10m"
|
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
@@ -85,53 +36,17 @@ flight_compression = "arrow_ipc"
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
## Watch for Certificate and key file change and auto reload.
|
||||||
## For now, gRPC tls config does not support auto reload.
|
## For now, gRPC tls config does not support auto reload.
|
||||||
watch = false
|
watch = false
|
||||||
|
|
||||||
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
|
|
||||||
[internal_grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:4010"
|
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:4010"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
|
|
||||||
## internal gRPC server TLS options, see `mysql.tls` section.
|
|
||||||
[internal_grpc.tls]
|
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
|
||||||
## For now, gRPC tls config does not support auto reload.
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
|
|
||||||
## MySQL server options.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
## Whether to enable.
|
## Whether to enable.
|
||||||
@@ -140,11 +55,6 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
|
||||||
prepared_stmt_cache_size = 10000
|
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -158,11 +68,11 @@ prepared_stmt_cache_size = 10000
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -176,9 +86,6 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -186,11 +93,11 @@ keep_alive = "0s"
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -206,11 +113,6 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
## Jaeger protocol options.
|
|
||||||
[jaeger]
|
|
||||||
## Whether to enable Jaeger protocol in HTTP API.
|
|
||||||
enable = true
|
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
@@ -226,6 +128,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
|
|||||||
## Operation timeout.
|
## Operation timeout.
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
|
|
||||||
|
## Heartbeat timeout.
|
||||||
|
heartbeat_timeout = "500ms"
|
||||||
|
|
||||||
## DDL timeout.
|
## DDL timeout.
|
||||||
ddl_timeout = "10s"
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
@@ -244,22 +149,6 @@ metadata_cache_ttl = "10m"
|
|||||||
# TTI of the metadata cache.
|
# TTI of the metadata cache.
|
||||||
metadata_cache_tti = "5m"
|
metadata_cache_tti = "5m"
|
||||||
|
|
||||||
## The query engine options.
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine.
|
|
||||||
## Default to 0, which means the number of CPU cores.
|
|
||||||
parallelism = 0
|
|
||||||
## Whether to allow query fallback when push down optimize fails.
|
|
||||||
## Default to false, meaning when push down optimize failed, return error msg
|
|
||||||
allow_query_fallback = false
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
@@ -269,78 +158,53 @@ tcp_nodelay = true
|
|||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files.
|
||||||
dir = "./greptimedb_data/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The slow query log options.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
[slow_query]
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
## Whether to enable slow query log.
|
[export_metrics]
|
||||||
enable = true
|
|
||||||
|
|
||||||
## The record type of slow queries. It can be `system_table` or `log`.
|
## whether enable export metrics.
|
||||||
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
|
enable = false
|
||||||
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
|
|
||||||
record_type = "system_table"
|
|
||||||
|
|
||||||
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
|
## The interval of export metrics.
|
||||||
threshold = "30s"
|
write_interval = "30s"
|
||||||
|
|
||||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
sample_ratio = 1.0
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
[export_metrics.remote_write]
|
||||||
ttl = "90d"
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
[tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
|
|
||||||
## Configuration options for the event recorder.
|
|
||||||
[event_recorder]
|
|
||||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
|
||||||
ttl = "90d"
|
|
||||||
|
|||||||
@@ -1,139 +1,29 @@
|
|||||||
## The working home directory.
|
## The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "/tmp/metasrv/"
|
||||||
|
|
||||||
## Store server address(es). The format depends on the selected backend.
|
## The bind address of metasrv.
|
||||||
##
|
bind_addr = "127.0.0.1:3002"
|
||||||
## For etcd: a list of "host:port" endpoints.
|
|
||||||
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
|
||||||
##
|
|
||||||
## For PostgreSQL: a connection string in libpq format or URI.
|
|
||||||
## e.g.
|
|
||||||
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
|
||||||
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
|
||||||
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
|
||||||
##
|
|
||||||
## For mysql store, the format is a MySQL connection URL.
|
|
||||||
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||||
store_key_prefix = ""
|
server_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## The datastore for meta server.
|
## Etcd server address.
|
||||||
## Available values:
|
store_addr = "127.0.0.1:2379"
|
||||||
## - `etcd_store` (default value)
|
|
||||||
## - `memory_store`
|
|
||||||
## - `postgres_store`
|
|
||||||
## - `mysql_store`
|
|
||||||
backend = "etcd_store"
|
|
||||||
|
|
||||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
|
||||||
## **Only used when backend is `postgres_store`.**
|
|
||||||
meta_table_name = "greptime_metakv"
|
|
||||||
|
|
||||||
## Optional PostgreSQL schema for metadata table and election table name qualification.
|
|
||||||
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
|
|
||||||
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
|
|
||||||
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
|
|
||||||
## **Only used when backend is `postgres_store`.**
|
|
||||||
|
|
||||||
meta_schema_name = "greptime_schema"
|
|
||||||
|
|
||||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
|
||||||
## Only used when backend is `postgres_store`.
|
|
||||||
meta_election_lock_id = 1
|
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `round_robin` (default value)
|
## - `lease_based` (default value).
|
||||||
## - `lease_based`
|
|
||||||
## - `load_based`
|
## - `load_based`
|
||||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||||
selector = "round_robin"
|
selector = "lease_based"
|
||||||
|
|
||||||
## Store data in memory.
|
## Store data in memory.
|
||||||
use_memory_store = false
|
use_memory_store = false
|
||||||
|
|
||||||
## Whether to enable region failover.
|
## Whether to enable greptimedb telemetry.
|
||||||
## This feature is only available on GreptimeDB running on cluster mode and
|
enable_telemetry = true
|
||||||
## - Using Remote WAL
|
|
||||||
## - Using shared storage (e.g., s3).
|
|
||||||
enable_region_failover = false
|
|
||||||
|
|
||||||
## The delay before starting region failure detection.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
store_key_prefix = ""
|
||||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
|
||||||
region_failure_detector_initialization_delay = '10m'
|
|
||||||
|
|
||||||
## Whether to allow region failover on local WAL.
|
|
||||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
|
||||||
allow_region_failover_on_local_wal = false
|
|
||||||
|
|
||||||
## Max allowed idle time before removing node info from metasrv memory.
|
|
||||||
node_max_idle_time = "24hours"
|
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
|
||||||
#+ enable_telemetry = true
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
|
||||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
|
||||||
##
|
|
||||||
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
|
||||||
## settings here will override the TLS settings in `store_addrs`.
|
|
||||||
[backend_tls]
|
|
||||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
|
||||||
## - "disable" - No TLS
|
|
||||||
## - "prefer" (default) - Try TLS, fallback to plain
|
|
||||||
## - "require" - Require TLS
|
|
||||||
## - "verify_ca" - Require TLS and verify CA
|
|
||||||
## - "verify_full" - Require TLS and verify hostname
|
|
||||||
mode = "prefer"
|
|
||||||
|
|
||||||
## Path to client certificate file (for client authentication)
|
|
||||||
## Like "/path/to/client.crt"
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Path to client private key file (for client authentication)
|
|
||||||
## Like "/path/to/client.key"
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Path to CA certificate file (for server certificate verification)
|
|
||||||
## Required when using custom CAs or self-signed certificates
|
|
||||||
## Leave empty to use system root certificates only
|
|
||||||
## Like "/path/to/ca.crt"
|
|
||||||
ca_cert_path = ""
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:3002"
|
|
||||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:3002"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## The maximum receive message size for gRPC server.
|
|
||||||
max_recv_message_size = "512MB"
|
|
||||||
## The maximum send message size for gRPC server.
|
|
||||||
max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
@@ -151,37 +41,19 @@ retry_delay = "500ms"
|
|||||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||||
max_metadata_value_size = "1500KiB"
|
max_metadata_value_size = "1500KiB"
|
||||||
|
|
||||||
## Max running procedures.
|
|
||||||
## The maximum number of procedures that can be running at the same time.
|
|
||||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
|
||||||
max_running_procedures = 128
|
|
||||||
|
|
||||||
# Failure detectors options.
|
# Failure detectors options.
|
||||||
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
|
|
||||||
[failure_detector]
|
[failure_detector]
|
||||||
## Maximum acceptable φ before the peer is treated as failed.
|
|
||||||
## Lower values react faster but yield more false positives.
|
|
||||||
threshold = 8.0
|
threshold = 8.0
|
||||||
## The minimum standard deviation of the heartbeat intervals.
|
|
||||||
## So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary.
|
|
||||||
min_std_deviation = "100ms"
|
min_std_deviation = "100ms"
|
||||||
## The acceptable pause duration between heartbeats.
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses.
|
first_heartbeat_estimate = "1000ms"
|
||||||
acceptable_heartbeat_pause = "10000ms"
|
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
|
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
|
|
||||||
## Operation timeout.
|
|
||||||
timeout = "10s"
|
timeout = "10s"
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
|
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
@@ -190,157 +62,88 @@ tcp_nodelay = true
|
|||||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
# Kafka wal config.
|
||||||
|
|
||||||
## The broker endpoints of the Kafka cluster.
|
## The broker endpoints of the Kafka cluster.
|
||||||
##
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
## Automatically create topics for WAL.
|
## Number of topics to be created upon start.
|
||||||
## Set to `true` to automatically create topics for WAL.
|
|
||||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
auto_create_topics = true
|
|
||||||
|
|
||||||
## Interval of automatically WAL pruning.
|
|
||||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
auto_prune_interval = "30m"
|
|
||||||
|
|
||||||
|
|
||||||
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
|
|
||||||
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
|
|
||||||
## (latest_entry_id - flushed_entry_id) * avg_record_size
|
|
||||||
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
|
|
||||||
## - `latest_entry_id`: The latest entry ID in the topic.
|
|
||||||
## - `flushed_entry_id`: The last flushed entry ID for the region.
|
|
||||||
## Set to "0" to let the system decide the flush trigger size.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
flush_trigger_size = "512MB"
|
|
||||||
|
|
||||||
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
|
|
||||||
## The estimated size is calculated as:
|
|
||||||
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
|
|
||||||
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
|
|
||||||
## Set to "0" to let the system decide the checkpoint trigger size.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
checkpoint_trigger_size = "128MB"
|
|
||||||
|
|
||||||
## Concurrent task limit for automatically WAL pruning.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
auto_prune_parallelism = 10
|
|
||||||
|
|
||||||
## Number of topics used for remote WAL.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
num_topics = 64
|
num_topics = 64
|
||||||
|
|
||||||
## Topic selector type.
|
## Topic selector type.
|
||||||
## Available selector types:
|
## Available selector types:
|
||||||
## - `round_robin` (default)
|
## - `round_robin` (default)
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
## Only accepts strings that match the following regular expression pattern:
|
|
||||||
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
## Expected number of replicas of each partition.
|
## Expected number of replicas of each partition.
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
## The timeout for creating a Kafka topic.
|
## Above which a topic creation operation will be cancelled.
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
create_topic_timeout = "30s"
|
create_topic_timeout = "30s"
|
||||||
|
## The initial backoff for kafka clients.
|
||||||
|
backoff_init = "500ms"
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
## The maximum backoff for kafka clients.
|
||||||
# **It's only used when the provider is `kafka`**.
|
backoff_max = "10s"
|
||||||
# Available SASL mechanisms:
|
|
||||||
# - `PLAIN`
|
|
||||||
# - `SCRAM-SHA-256`
|
|
||||||
# - `SCRAM-SHA-512`
|
|
||||||
# [wal.sasl]
|
|
||||||
# type = "SCRAM-SHA-512"
|
|
||||||
# username = "user_kafka"
|
|
||||||
# password = "secret"
|
|
||||||
|
|
||||||
# The Kafka TLS configuration.
|
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
# **It's only used when the provider is `kafka`**.
|
backoff_base = 2
|
||||||
# [wal.tls]
|
|
||||||
# server_ca_cert_path = "/path/to/server_cert"
|
|
||||||
# client_cert_path = "/path/to/client_cert"
|
|
||||||
# client_key_path = "/path/to/key"
|
|
||||||
|
|
||||||
## Configuration options for the event recorder.
|
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
[event_recorder]
|
backoff_deadline = "5mins"
|
||||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
|
||||||
ttl = "90d"
|
|
||||||
|
|
||||||
## Configuration options for the stats persistence.
|
|
||||||
[stats_persistence]
|
|
||||||
## TTL for the stats table that will be used to store the stats.
|
|
||||||
## Set to `0s` to disable stats persistence.
|
|
||||||
## Default is `0s`.
|
|
||||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
|
||||||
## It is recommended to set a small value, e.g., `3h`.
|
|
||||||
ttl = "0s"
|
|
||||||
## The interval to persist the stats. Default is `10m`.
|
|
||||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
|
||||||
interval = "10m"
|
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files.
|
||||||
dir = "./greptimedb_data/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
#+ [tracing]
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
## The tokio console address.
|
[export_metrics]
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
## The memory options.
|
## whether enable export metrics.
|
||||||
[memory]
|
enable = false
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
## The interval of export metrics.
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
write_interval = "30s"
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
|
[export_metrics.self_import]
|
||||||
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -1,81 +1,29 @@
|
|||||||
|
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||||
|
mode = "standalone"
|
||||||
|
|
||||||
|
## Enable telemetry to collect anonymous usage data.
|
||||||
|
enable_telemetry = true
|
||||||
|
|
||||||
## The default timezone of the server.
|
## The default timezone of the server.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
## The default column prefix for auto-created time index and value columns.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
default_column_prefix = "greptime"
|
|
||||||
|
|
||||||
## Initialize all regions in the background during the startup.
|
|
||||||
## By default, it provides services after all regions have been initialized.
|
|
||||||
init_regions_in_background = false
|
|
||||||
|
|
||||||
## Parallelism of initializing regions.
|
|
||||||
init_regions_parallelism = 16
|
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
|
||||||
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
|
||||||
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
|
||||||
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
|
||||||
max_concurrent_queries = 0
|
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
|
||||||
#+ enable_telemetry = true
|
|
||||||
|
|
||||||
## The maximum in-flight write bytes.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_in_flight_write_bytes = "500MB"
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## The HTTP server options.
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
## The address to bind the HTTP server.
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
## HTTP request timeout.
|
||||||
timeout = "0s"
|
timeout = "30s"
|
||||||
## HTTP request body limit.
|
## HTTP request body limit.
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
## Maximum total memory for all concurrent HTTP request bodies.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_body_memory = "1GB"
|
|
||||||
## HTTP CORS support, it's turned on by default
|
|
||||||
## This allows browser to access http APIs without CORS restrictions
|
|
||||||
enable_cors = true
|
|
||||||
## Customize allowed origins for HTTP CORS.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cors_allowed_origins = ["https://example.com"]
|
|
||||||
|
|
||||||
## Whether to enable validation for Prometheus remote write requests.
|
|
||||||
## Available options:
|
|
||||||
## - strict: deny invalid UTF-8 strings (default).
|
|
||||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
|
||||||
## - unchecked: do not valid strings.
|
|
||||||
prom_validation_mode = "strict"
|
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
bind_addr = "127.0.0.1:4001"
|
addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## Maximum total memory for all concurrent gRPC request messages.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_message_memory = "1GB"
|
|
||||||
## The maximum connection age for gRPC connection.
|
|
||||||
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
|
||||||
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_connection_age = "10m"
|
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
[grpc.tls]
|
[grpc.tls]
|
||||||
@@ -83,11 +31,11 @@ runtime_size = 8
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
## Watch for Certificate and key file change and auto reload.
|
||||||
@@ -102,11 +50,7 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
|
||||||
prepared_stmt_cache_size= 10000
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
|
|
||||||
@@ -119,11 +63,11 @@ prepared_stmt_cache_size= 10000
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -137,9 +81,6 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -147,11 +88,11 @@ keep_alive = "0s"
|
|||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
## Certificate file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
## Private key file path.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
## Watch for Certificate and key file change and auto reload
|
||||||
@@ -167,11 +108,6 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
## Jaeger protocol options.
|
|
||||||
[jaeger]
|
|
||||||
## Whether to enable Jaeger protocol in HTTP API.
|
|
||||||
enable = true
|
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
@@ -188,20 +124,20 @@ provider = "raft_engine"
|
|||||||
|
|
||||||
## The directory to store the WAL files.
|
## The directory to store the WAL files.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
dir = "./greptimedb_data/wal"
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
|
||||||
## The size of the WAL segment file.
|
## The size of the WAL segment file.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "128MB"
|
file_size = "256MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a purge.
|
## The threshold of the WAL size to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "1GB"
|
purge_threshold = "4GB"
|
||||||
|
|
||||||
## The interval to trigger a purge.
|
## The interval to trigger a flush.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "1m"
|
purge_interval = "10m"
|
||||||
|
|
||||||
## The read batch size.
|
## The read batch size.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
@@ -223,87 +159,45 @@ prefill_log_files = false
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
sync_period = "10s"
|
sync_period = "10s"
|
||||||
|
|
||||||
## Parallelism during WAL recovery.
|
|
||||||
recovery_parallelism = 2
|
|
||||||
|
|
||||||
## The Kafka broker endpoints.
|
## The Kafka broker endpoints.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
## Automatically create topics for WAL.
|
|
||||||
## Set to `true` to automatically create topics for WAL.
|
|
||||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
|
||||||
auto_create_topics = true
|
|
||||||
|
|
||||||
## Number of topics.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
num_topics = 64
|
|
||||||
|
|
||||||
## Topic selector type.
|
|
||||||
## Available selector types:
|
|
||||||
## - `round_robin` (default)
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
selector_type = "round_robin"
|
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
|
||||||
|
|
||||||
## Expected number of replicas of each partition.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
replication_factor = 1
|
|
||||||
|
|
||||||
## Above which a topic creation operation will be cancelled.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
create_topic_timeout = "30s"
|
|
||||||
|
|
||||||
## The max size of a single producer batch.
|
## The max size of a single producer batch.
|
||||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
max_batch_bytes = "1MB"
|
max_batch_size = "1MB"
|
||||||
|
|
||||||
|
## The linger duration of a kafka batch producer.
|
||||||
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
linger = "200ms"
|
||||||
|
|
||||||
## The consumer wait timeout.
|
## The consumer wait timeout.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
consumer_wait_timeout = "100ms"
|
consumer_wait_timeout = "100ms"
|
||||||
|
|
||||||
## Ignore missing entries during read WAL.
|
## The initial backoff delay.
|
||||||
## **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
##
|
backoff_init = "500ms"
|
||||||
## This option ensures that when Kafka messages are deleted, the system
|
|
||||||
## can still successfully replay memtable data without throwing an
|
|
||||||
## out-of-range error.
|
|
||||||
## However, enabling this option might lead to unexpected data loss,
|
|
||||||
## as the system will skip over missing entries instead of treating
|
|
||||||
## them as critical errors.
|
|
||||||
overwrite_entry_start_id = false
|
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
## The maximum backoff delay.
|
||||||
# **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
# Available SASL mechanisms:
|
backoff_max = "10s"
|
||||||
# - `PLAIN`
|
|
||||||
# - `SCRAM-SHA-256`
|
|
||||||
# - `SCRAM-SHA-512`
|
|
||||||
# [wal.sasl]
|
|
||||||
# type = "SCRAM-SHA-512"
|
|
||||||
# username = "user_kafka"
|
|
||||||
# password = "secret"
|
|
||||||
|
|
||||||
# The Kafka TLS configuration.
|
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
# **It's only used when the provider is `kafka`**.
|
## **It's only used when the provider is `kafka`**.
|
||||||
# [wal.tls]
|
backoff_base = 2
|
||||||
# server_ca_cert_path = "/path/to/server_cert"
|
|
||||||
# client_cert_path = "/path/to/client_cert"
|
## The deadline of retries.
|
||||||
# client_key_path = "/path/to/key"
|
## **It's only used when the provider is `kafka`**.
|
||||||
|
backoff_deadline = "5mins"
|
||||||
|
|
||||||
## Metadata storage options.
|
## Metadata storage options.
|
||||||
[metadata_store]
|
[metadata_store]
|
||||||
## The size of the metadata store log file.
|
## Kv file size in bytes.
|
||||||
file_size = "64MB"
|
file_size = "256MB"
|
||||||
## The threshold of the metadata store size to trigger a purge.
|
## Kv purge threshold.
|
||||||
purge_threshold = "256MB"
|
purge_threshold = "4GB"
|
||||||
## The interval of the metadata store to trigger a purge.
|
|
||||||
purge_interval = "1m"
|
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
@@ -311,16 +205,6 @@ purge_interval = "1m"
|
|||||||
max_retry_times = 3
|
max_retry_times = 3
|
||||||
## Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
## Max running procedures.
|
|
||||||
## The maximum number of procedures that can be running at the same time.
|
|
||||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
|
||||||
max_running_procedures = 128
|
|
||||||
|
|
||||||
## flow engine options.
|
|
||||||
[flow]
|
|
||||||
## The number of flow worker in flownode.
|
|
||||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
|
||||||
#+num_workers=0
|
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -331,7 +215,6 @@ max_running_procedures = 128
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
# enable_virtual_host_style = false
|
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -359,26 +242,12 @@ max_running_procedures = 128
|
|||||||
# root = "data"
|
# root = "data"
|
||||||
# scope = "test"
|
# scope = "test"
|
||||||
# credential_path = "123456"
|
# credential_path = "123456"
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
# endpoint = "https://storage.googleapis.com"
|
||||||
|
|
||||||
## The query engine options.
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine.
|
|
||||||
## Default to 0, which means the number of CPU cores.
|
|
||||||
parallelism = 0
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans.
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
## The storage type used to store the data.
|
## The storage type used to store the data.
|
||||||
## - `File`: the data is stored in the local file system.
|
## - `File`: the data is stored in the local file system.
|
||||||
@@ -388,130 +257,87 @@ data_home = "./greptimedb_data"
|
|||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
## Cache configuration for object storage such as 'S3' etc.
|
||||||
#+ enable_read_cache = true
|
## The local file cache directory.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cache_path = "/path/local_cache"
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## The local file cache capacity in bytes.
|
||||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
## +toml2docs:none-default
|
||||||
## @toml2docs:none-default
|
cache_capacity = "256MB"
|
||||||
#+ cache_path = ""
|
|
||||||
|
|
||||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cache_capacity = "5GiB"
|
|
||||||
|
|
||||||
## The S3 bucket name.
|
## The S3 bucket name.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
bucket = "greptimedb"
|
bucket = "greptimedb"
|
||||||
|
|
||||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
root = "greptimedb"
|
root = "greptimedb"
|
||||||
|
|
||||||
## The access key id of the aws account.
|
## The access key id of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
access_key_id = "test"
|
access_key_id = "test"
|
||||||
|
|
||||||
## The secret access key of the aws account.
|
## The secret access key of the aws account.
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||||
## **It's only used when the storage type is `S3`**.
|
## **It's only used when the storage type is `S3`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
secret_access_key = "test"
|
secret_access_key = "test"
|
||||||
|
|
||||||
## The secret access key of the aliyun account.
|
## The secret access key of the aliyun account.
|
||||||
## **It's only used when the storage type is `Oss`**.
|
## **It's only used when the storage type is `Oss`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
access_key_secret = "test"
|
access_key_secret = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
account_name = "test"
|
account_name = "test"
|
||||||
|
|
||||||
## The account key of the azure account.
|
## The account key of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
account_key = "test"
|
account_key = "test"
|
||||||
|
|
||||||
## The scope of the google cloud storage.
|
## The scope of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
scope = "test"
|
scope = "test"
|
||||||
|
|
||||||
## The credential path of the google cloud storage.
|
## The credential path of the google cloud storage.
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
## **It's only used when the storage type is `Gcs`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
credential_path = "test"
|
credential_path = "test"
|
||||||
|
|
||||||
## The credential of the google cloud storage.
|
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
credential = "base64-credential"
|
|
||||||
|
|
||||||
## The container of the azure account.
|
## The container of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
container = "greptimedb"
|
container = "greptimedb"
|
||||||
|
|
||||||
## The sas token of the azure account.
|
## The sas token of the azure account.
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
## **It's only used when the storage type is `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
sas_token = ""
|
sas_token = ""
|
||||||
|
|
||||||
## The endpoint of the S3 service.
|
## The endpoint of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
endpoint = "https://s3.amazonaws.com"
|
endpoint = "https://s3.amazonaws.com"
|
||||||
|
|
||||||
## The region of the S3 service.
|
## The region of the S3 service.
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
|
|
||||||
## The http client options to the storage.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
|
||||||
[storage.http_client]
|
|
||||||
|
|
||||||
## The maximum idle connection per host allowed in the pool.
|
|
||||||
pool_max_idle_per_host = 1024
|
|
||||||
|
|
||||||
## The timeout for only the connect phase of a http client.
|
|
||||||
connect_timeout = "30s"
|
|
||||||
|
|
||||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
|
||||||
## Also considered a total deadline.
|
|
||||||
timeout = "30s"
|
|
||||||
|
|
||||||
## The timeout for idle sockets being kept-alive.
|
|
||||||
pool_idle_timeout = "90s"
|
|
||||||
|
|
||||||
## To skip the ssl verification
|
|
||||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
|
||||||
skip_ssl_validation = false
|
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "S3"
|
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# access_key_id = "test"
|
|
||||||
# secret_access_key = "123456"
|
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
|
||||||
# region = "us-west-2"
|
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "Gcs"
|
|
||||||
# type = "Gcs"
|
# type = "Gcs"
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# scope = "test"
|
|
||||||
# credential_path = "123456"
|
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
|
||||||
|
|
||||||
## The region engine options. You can configure multiple region engines.
|
## The region engine options. You can configure multiple region engines.
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
@@ -520,7 +346,7 @@ skip_ssl_validation = false
|
|||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
|
|
||||||
## Number of region workers.
|
## Number of region workers.
|
||||||
#+ num_workers = 8
|
num_workers = 8
|
||||||
|
|
||||||
## Request channel size of each worker.
|
## Request channel size of each worker.
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
@@ -534,207 +360,82 @@ manifest_checkpoint_distance = 10
|
|||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
|
||||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
## Max number of running background jobs
|
||||||
## @toml2docs:none-default="Auto"
|
max_background_jobs = 4
|
||||||
#+ max_background_flushes = 4
|
|
||||||
|
|
||||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_compactions = 2
|
|
||||||
|
|
||||||
## Max number of running background purge jobs (default: number of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_purges = 8
|
|
||||||
|
|
||||||
## Interval to auto flush a region if it has not flushed yet.
|
## Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
|
|
||||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||||
## @toml2docs:none-default="Auto"
|
global_write_buffer_size = "1GB"
|
||||||
#+ global_write_buffer_size = "1GB"
|
|
||||||
|
|
||||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`.
|
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||||
## @toml2docs:none-default="Auto"
|
global_write_buffer_reject_size = "2GB"
|
||||||
#+ global_write_buffer_reject_size = "2GB"
|
|
||||||
|
|
||||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||||
## @toml2docs:none-default="Auto"
|
sst_meta_cache_size = "128MB"
|
||||||
#+ sst_meta_cache_size = "128MB"
|
|
||||||
|
|
||||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
## @toml2docs:none-default="Auto"
|
vector_cache_size = "512MB"
|
||||||
#+ vector_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||||
## If not set, it's default to 1/8 of OS memory.
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ page_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
## @toml2docs:none-default="Auto"
|
page_cache_size = "512MB"
|
||||||
#+ selector_result_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the experimental write cache.
|
||||||
enable_write_cache = false
|
enable_experimental_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}`.
|
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||||
write_cache_path = ""
|
experimental_write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache.
|
||||||
write_cache_size = "5GiB"
|
experimental_write_cache_size = "512MB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
experimental_write_cache_ttl = "1h"
|
||||||
write_cache_ttl = "8h"
|
|
||||||
|
|
||||||
## Preload index (puffin) files into cache on region open (default: true).
|
|
||||||
## When enabled, index files are loaded into the write cache during region initialization,
|
|
||||||
## which can improve query performance at the cost of longer startup times.
|
|
||||||
preload_index_cache = true
|
|
||||||
|
|
||||||
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
|
||||||
## The remaining capacity is used for data (parquet) files.
|
|
||||||
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
|
||||||
## 1GiB is reserved for index files and 4GiB for data files.
|
|
||||||
index_cache_percent = 20
|
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
|
## Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
|
## - `0`: using the default value (1/4 of cpu cores).
|
||||||
|
## - `1`: scan in current thread.
|
||||||
|
## - `n`: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Maximum number of SST files to scan concurrently.
|
|
||||||
max_concurrent_scan_files = 384
|
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
## Memory limit for table scans across all queries.
|
|
||||||
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit.
|
|
||||||
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
|
||||||
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
|
||||||
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
|
||||||
scan_memory_limit = "50%"
|
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
|
||||||
min_compaction_interval = "0m"
|
|
||||||
|
|
||||||
## Whether to enable experimental flat format as the default format.
|
|
||||||
default_experimental_flat_format = false
|
|
||||||
|
|
||||||
## The options for index in Mito engine.
|
|
||||||
[region_engine.mito.index]
|
|
||||||
|
|
||||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
|
||||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
|
||||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
|
||||||
##
|
|
||||||
## This path contains two subdirectories:
|
|
||||||
## - `__intm`: for storing intermediate files used during creating index.
|
|
||||||
## - `staging`: for storing staging files used during searching index.
|
|
||||||
aux_path = ""
|
|
||||||
|
|
||||||
## The max capacity of the staging directory.
|
|
||||||
staging_size = "2GB"
|
|
||||||
|
|
||||||
## The TTL of the staging directory.
|
|
||||||
## Defaults to 7 days.
|
|
||||||
## Setting it to "0s" to disable TTL.
|
|
||||||
staging_ttl = "7d"
|
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## Cache size for index result.
|
|
||||||
result_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
## Whether to create the index on flush.
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_flush = "auto"
|
create_on_flush = "auto"
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
## Whether to create the index on compaction.
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
create_on_compaction = "auto"
|
create_on_compaction = "auto"
|
||||||
|
|
||||||
## Whether to apply the index on query
|
## Whether to apply the index on query
|
||||||
## - `auto`: automatically (default)
|
## - `auto`: automatically
|
||||||
## - `disable`: never
|
## - `disable`: never
|
||||||
apply_on_query = "auto"
|
apply_on_query = "auto"
|
||||||
|
|
||||||
## Memory threshold for performing an external sort during index creation.
|
## Memory threshold for performing an external sort during index creation.
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
## Setting to empty will disable external sorting, forcing all sorting operations to happen in memory.
|
||||||
## - `unlimited`: no memory limit
|
mem_threshold_on_create = "64M"
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`).
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
|
||||||
[region_engine.mito.fulltext_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## The options for bloom filter in Mito engine.
|
|
||||||
[region_engine.mito.bloom_filter_index]
|
|
||||||
|
|
||||||
## Whether to create the bloom filter on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the bloom filter on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the bloom filter on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for bloom filter creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
[region_engine.mito.memtable]
|
[region_engine.mito.memtable]
|
||||||
## Memtable type.
|
## Memtable type.
|
||||||
## - `time_series`: time-series memtable
|
## - `time_series`: time-series memtable
|
||||||
@@ -753,83 +454,55 @@ data_freeze_threshold = 32768
|
|||||||
## Only available for `partition_tree` memtable.
|
## Only available for `partition_tree` memtable.
|
||||||
fork_dictionary_bytes = "1GiB"
|
fork_dictionary_bytes = "1GiB"
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Enable the file engine.
|
|
||||||
[region_engine.file]
|
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Metric engine options.
|
|
||||||
[region_engine.metric]
|
|
||||||
## Whether to use sparse primary key encoding.
|
|
||||||
sparse_primary_key_encoding = true
|
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files.
|
||||||
dir = "./greptimedb_data/logs"
|
dir = "/tmp/greptimedb/logs"
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
## Enable OTLP tracing.
|
||||||
enable_otlp_tracing = false
|
enable_otlp_tracing = false
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
## The OTLP tracing endpoint.
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
## +toml2docs:none-default
|
||||||
|
otlp_endpoint = ""
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
## Whether to append logs to stdout.
|
||||||
append_stdout = true
|
append_stdout = true
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
## The percentage of tracing will be sampled and exported.
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The slow query log options.
|
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
|
||||||
[slow_query]
|
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
## Whether to enable slow query log.
|
[export_metrics]
|
||||||
#+ enable = false
|
|
||||||
|
|
||||||
## The record type of slow queries. It can be `system_table` or `log`.
|
## whether enable export metrics.
|
||||||
## @toml2docs:none-default
|
enable = false
|
||||||
#+ record_type = "system_table"
|
|
||||||
|
|
||||||
## The threshold of slow query.
|
## The interval of export metrics.
|
||||||
## @toml2docs:none-default
|
write_interval = "30s"
|
||||||
#+ threshold = "10s"
|
|
||||||
|
|
||||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
|
||||||
## @toml2docs:none-default
|
[export_metrics.self_import]
|
||||||
#+ sample_ratio = 1.0
|
## +toml2docs:none-default
|
||||||
|
db = "information_schema"
|
||||||
|
|
||||||
|
[export_metrics.remote_write]
|
||||||
|
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`.
|
||||||
|
url = ""
|
||||||
|
|
||||||
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
|
headers = { }
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
[tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
## @toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
|
|||||||
@@ -1,156 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2023 Greptime Team
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import * as core from "@actions/core";
|
|
||||||
import {obtainClient} from "@/common";
|
|
||||||
|
|
||||||
interface RepoConfig {
|
|
||||||
tokenEnv: string;
|
|
||||||
repo: string;
|
|
||||||
workflowLogic: (version: string) => [string, string] | null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
|
||||||
website: {
|
|
||||||
tokenEnv: "WEBSITE_REPO_TOKEN",
|
|
||||||
repo: "website",
|
|
||||||
workflowLogic: (version: string) => {
|
|
||||||
// Skip nightly versions for website
|
|
||||||
if (version.includes('nightly')) {
|
|
||||||
console.log('Nightly version detected for website, skipping workflow trigger.');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return ['bump-patch-version.yml', version];
|
|
||||||
}
|
|
||||||
},
|
|
||||||
demo: {
|
|
||||||
tokenEnv: "DEMO_REPO_TOKEN",
|
|
||||||
repo: "demo-scene",
|
|
||||||
workflowLogic: (version: string) => {
|
|
||||||
// Skip nightly versions for demo
|
|
||||||
if (version.includes('nightly')) {
|
|
||||||
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return ['bump-patch-version.yml', version];
|
|
||||||
}
|
|
||||||
},
|
|
||||||
docs: {
|
|
||||||
tokenEnv: "DOCS_REPO_TOKEN",
|
|
||||||
repo: "docs",
|
|
||||||
workflowLogic: (version: string) => {
|
|
||||||
// Check if it's a nightly version
|
|
||||||
if (version.includes('nightly')) {
|
|
||||||
return ['bump-nightly-version.yml', version];
|
|
||||||
}
|
|
||||||
|
|
||||||
const parts = version.split('.');
|
|
||||||
if (parts.length !== 3) {
|
|
||||||
throw new Error('Invalid version format');
|
|
||||||
}
|
|
||||||
|
|
||||||
// If patch version (last number) is 0, it's a major version
|
|
||||||
// Return only major.minor version
|
|
||||||
if (parts[2] === '0') {
|
|
||||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise it's a patch version, use full version
|
|
||||||
return ['bump-patch-version.yml', version];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
|
||||||
const client = obtainClient(repoConfig.tokenEnv);
|
|
||||||
try {
|
|
||||||
await client.rest.actions.createWorkflowDispatch({
|
|
||||||
owner: "GreptimeTeam",
|
|
||||||
repo: repoConfig.repo,
|
|
||||||
workflow_id: workflowId,
|
|
||||||
ref: "main",
|
|
||||||
inputs: {
|
|
||||||
version,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
|
||||||
} catch (error) {
|
|
||||||
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function processRepo(repoName: string, version: string) {
|
|
||||||
const repoConfig = REPO_CONFIGS[repoName];
|
|
||||||
if (!repoConfig) {
|
|
||||||
throw new Error(`Unknown repository: ${repoName}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const workflowResult = repoConfig.workflowLogic(version);
|
|
||||||
if (workflowResult === null) {
|
|
||||||
// Skip this repo (e.g., nightly version for website)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const [workflowId, apiVersion] = workflowResult;
|
|
||||||
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
|
||||||
} catch (error) {
|
|
||||||
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
const version = process.env.VERSION;
|
|
||||||
if (!version) {
|
|
||||||
core.setFailed("VERSION environment variable is required");
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove 'v' prefix if exists
|
|
||||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
|
||||||
|
|
||||||
// Get target repositories from environment variable
|
|
||||||
// Default to both if not specified
|
|
||||||
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
|
||||||
|
|
||||||
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
|
||||||
|
|
||||||
const errors: string[] = [];
|
|
||||||
|
|
||||||
// Process each repository
|
|
||||||
for (const repo of targetRepos) {
|
|
||||||
try {
|
|
||||||
await processRepo(repo, cleanVersion);
|
|
||||||
} catch (error) {
|
|
||||||
errors.push(`${repo}: ${error.message}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (errors.length > 0) {
|
|
||||||
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('All repositories processed successfully');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute main function
|
|
||||||
main().catch((error) => {
|
|
||||||
core.setFailed(`Unexpected error: ${error.message}`);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
||||||
@@ -55,25 +55,12 @@ async function main() {
|
|||||||
await client.rest.issues.addLabels({
|
await client.rest.issues.addLabels({
|
||||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||||
})
|
})
|
||||||
|
|
||||||
// Get available assignees for the docs repo
|
|
||||||
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
|
||||||
owner: 'GreptimeTeam',
|
|
||||||
repo: 'docs',
|
|
||||||
})
|
|
||||||
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
|
||||||
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
|
||||||
|
|
||||||
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
|
||||||
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
|
||||||
core.info(`Assigning issue to: ${assignee}`)
|
|
||||||
|
|
||||||
await docsClient.rest.issues.create({
|
await docsClient.rest.issues.create({
|
||||||
owner: 'GreptimeTeam',
|
owner: 'GreptimeTeam',
|
||||||
repo: 'docs',
|
repo: 'docs',
|
||||||
title: `Update docs for ${title}`,
|
title: `Update docs for ${title}`,
|
||||||
body: `A document change request is generated from ${html_url}`,
|
body: `A document change request is generated from ${html_url}`,
|
||||||
assignee: assignee,
|
assignee: actor,
|
||||||
}).then((res) => {
|
}).then((res) => {
|
||||||
core.info(`Created issue ${res.data}`)
|
core.info(`Created issue ${res.data}`)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
FROM centos:7 AS builder
|
FROM centos:7 as builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
@@ -13,6 +13,8 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
|
rh-python38 \
|
||||||
|
rh-python38-python-devel \
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
@@ -22,7 +24,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
@@ -33,7 +35,7 @@ RUN --mount=target=.,rw \
|
|||||||
TARGET_DIR=/out/target
|
TARGET_DIR=/out/target
|
||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
FROM centos:7 AS base
|
FROM centos:7 as base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
@@ -41,12 +43,12 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
|
rh-python38 \
|
||||||
|
rh-python38-python-devel \
|
||||||
which
|
which
|
||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
FROM ubuntu:22.04 AS builder
|
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
|
||||||
ARG FEATURES
|
|
||||||
ARG OUTPUT_DIR
|
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
|
||||||
WORKDIR /greptimedb
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
|
||||||
|
|
||||||
# Install dependencies.
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y \
|
|
||||||
libssl-dev \
|
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
build-essential \
|
|
||||||
pkg-config
|
|
||||||
|
|
||||||
# Install Rust.
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
|
||||||
ENV PATH=/root/.cargo/bin/:$PATH
|
|
||||||
|
|
||||||
# Build the project in release mode.
|
|
||||||
RUN --mount=target=. \
|
|
||||||
--mount=type=cache,target=/root/.cargo/registry \
|
|
||||||
make build \
|
|
||||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
|
||||||
FEATURES=${FEATURES} \
|
|
||||||
TARGET_DIR=/out/target
|
|
||||||
|
|
||||||
FROM ubuntu:22.04 AS libs
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
# Copy required library dependencies based on architecture
|
|
||||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
|
||||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
|
|
||||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
|
||||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
|
|
||||||
else \
|
|
||||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Export the binary to the clean distroless image.
|
|
||||||
FROM gcr.io/distroless/cc-debian12:latest AS base
|
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
# Copy required library dependencies
|
|
||||||
COPY --from=libs /lib /lib
|
|
||||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
|
||||||
|
|
||||||
WORKDIR /greptime
|
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
|
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
|
||||||
@@ -1,14 +1,16 @@
|
|||||||
FROM ubuntu:22.04 AS builder
|
FROM ubuntu:20.04 as builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Add PPA for Python 3.10.
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||||
|
add-apt-repository ppa:deadsnakes/ppa -y
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
@@ -18,12 +20,15 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
|||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config
|
pkg-config \
|
||||||
|
python3.10 \
|
||||||
|
python3.10-dev \
|
||||||
|
python3-pip
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH=/root/.cargo/bin/:$PATH
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
@@ -35,18 +40,23 @@ RUN --mount=target=. \
|
|||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
# TODO(zyy17): Maybe should use the more secure container image.
|
# TODO(zyy17): Maybe should use the more secure container image.
|
||||||
FROM ubuntu:22.04 AS base
|
FROM ubuntu:22.04 as base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
||||||
-y install ca-certificates \
|
-y install ca-certificates \
|
||||||
|
python3.10 \
|
||||||
|
python3.10-dev \
|
||||||
|
python3-pip \
|
||||||
curl
|
curl
|
||||||
|
|
||||||
|
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
|
|
||||||
|
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
@@ -1,20 +1,16 @@
|
|||||||
FROM centos:7
|
FROM centos:7
|
||||||
|
|
||||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
|
||||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
|
||||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
|
||||||
|
|
||||||
RUN yum install -y epel-release \
|
RUN yum install -y epel-release \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl
|
centos-release-scl \
|
||||||
|
rh-python38 \
|
||||||
|
rh-python38-python-devel
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
ADD $TARGETARCH/greptime /greptime/bin/
|
ADD $TARGETARCH/greptime /greptime/bin/
|
||||||
|
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
ENTRYPOINT ["greptime"]
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
FROM ubuntu:22.04 AS libs
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
# Copy required library dependencies based on architecture
|
|
||||||
# TARGETARCH values: amd64, arm64
|
|
||||||
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
|
|
||||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
|
||||||
mkdir -p /output/x86_64-linux-gnu && \
|
|
||||||
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
|
|
||||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
|
||||||
mkdir -p /output/aarch64-linux-gnu && \
|
|
||||||
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
|
|
||||||
else \
|
|
||||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
FROM gcr.io/distroless/cc-debian12:latest
|
|
||||||
|
|
||||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
|
||||||
ARG DOCKER_BUILD_ROOT=.
|
|
||||||
# The binary name of GreptimeDB executable.
|
|
||||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
|
||||||
ARG TARGET_BIN=greptime
|
|
||||||
|
|
||||||
ARG TARGETARCH
|
|
||||||
|
|
||||||
# Copy required library dependencies
|
|
||||||
COPY --from=libs /output /lib
|
|
||||||
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
|
||||||
|
|
||||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
|
||||||
|
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
|
||||||
|
|
||||||
ENV TARGET_BIN=$TARGET_BIN
|
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["greptime"]
|
|
||||||
@@ -8,16 +8,21 @@ ARG TARGET_BIN=greptime
|
|||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
python3.10 \
|
||||||
|
python3.10-dev \
|
||||||
|
python3-pip \
|
||||||
curl
|
curl
|
||||||
|
|
||||||
|
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
|
|
||||||
|
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
ENV PATH=/greptime/bin/:$PATH
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV TARGET_BIN=$TARGET_BIN
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
|
||||||
|
|
||||||
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:latest
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
# The binary name of GreptimeDB executable.
|
# The binary name of GreptimeDB executable.
|
||||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
|||||||
@@ -9,21 +9,16 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
|
|||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
|
protobuf-compiler \
|
||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
unzip \
|
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
openssh-client
|
python3 \
|
||||||
|
python3-dev \
|
||||||
# Install protoc
|
python3-pip \
|
||||||
ARG PROTOBUF_VERSION=29.3
|
&& pip3 install --upgrade pip \
|
||||||
|
&& pip3 install pyarrow
|
||||||
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
|
||||||
|
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
|
||||||
|
|
||||||
# Trust workdir
|
# Trust workdir
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|||||||
@@ -1,50 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -euxo pipefail
|
|
||||||
|
|
||||||
cd "$(mktemp -d)"
|
|
||||||
# Fix version to v1.6.6, this is different than the latest version in original install script in
|
|
||||||
# https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh
|
|
||||||
base_url="https://github.com/cargo-bins/cargo-binstall/releases/download/v1.6.6/cargo-binstall-"
|
|
||||||
|
|
||||||
os="$(uname -s)"
|
|
||||||
if [ "$os" == "Darwin" ]; then
|
|
||||||
url="${base_url}universal-apple-darwin.zip"
|
|
||||||
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
|
|
||||||
unzip cargo-binstall-universal-apple-darwin.zip
|
|
||||||
elif [ "$os" == "Linux" ]; then
|
|
||||||
machine="$(uname -m)"
|
|
||||||
if [ "$machine" == "armv7l" ]; then
|
|
||||||
machine="armv7"
|
|
||||||
fi
|
|
||||||
target="${machine}-unknown-linux-musl"
|
|
||||||
if [ "$machine" == "armv7" ]; then
|
|
||||||
target="${target}eabihf"
|
|
||||||
fi
|
|
||||||
|
|
||||||
url="${base_url}${target}.tgz"
|
|
||||||
curl -L --proto '=https' --tlsv1.2 -sSf "$url" | tar -xvzf -
|
|
||||||
elif [ "${OS-}" = "Windows_NT" ]; then
|
|
||||||
machine="$(uname -m)"
|
|
||||||
target="${machine}-pc-windows-msvc"
|
|
||||||
url="${base_url}${target}.zip"
|
|
||||||
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
|
|
||||||
unzip "cargo-binstall-${target}.zip"
|
|
||||||
else
|
|
||||||
echo "Unsupported OS ${os}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
./cargo-binstall -y --force cargo-binstall
|
|
||||||
|
|
||||||
CARGO_HOME="${CARGO_HOME:-$HOME/.cargo}"
|
|
||||||
|
|
||||||
if ! [[ ":$PATH:" == *":$CARGO_HOME/bin:"* ]]; then
|
|
||||||
if [ -n "${CI:-}" ] && [ -n "${GITHUB_PATH:-}" ]; then
|
|
||||||
echo "$CARGO_HOME/bin" >> "$GITHUB_PATH"
|
|
||||||
else
|
|
||||||
echo
|
|
||||||
printf "\033[0;31mYour path is missing %s, you might want to add it.\033[0m\n" "$CARGO_HOME/bin"
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
@@ -2,42 +2,29 @@ FROM centos:7 as builder
|
|||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
|
|
||||||
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
|
|
||||||
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
|
||||||
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
|
||||||
RUN yum install -y epel-release \
|
RUN yum install -y epel-release \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
|
rh-python38 \
|
||||||
|
rh-python38-python-devel \
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
ARG PROTOBUF_VERSION=29.3
|
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
||||||
|
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
||||||
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
|
||||||
|
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Install Rust toolchains.
|
# Install Rust toolchains.
|
||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
|
||||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
|
||||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
|
||||||
# compile from source take too long, so we use the precompiled binary instead
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
|
||||||
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
|
RUN cargo install cargo-binstall --locked
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
ARG DOCKER_BUILD_ROOT=.
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
@@ -6,34 +6,29 @@ ARG DOCKER_BUILD_ROOT=.
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Add PPA for Python 3.10.
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||||
|
add-apt-repository ppa:deadsnakes/ppa -y
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
tzdata \
|
tzdata \
|
||||||
|
protobuf-compiler \
|
||||||
curl \
|
curl \
|
||||||
unzip \
|
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config
|
pkg-config \
|
||||||
|
python3.10 \
|
||||||
|
python3.10-dev
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
# Remove Python 3.8 and install pip.
|
||||||
RUN echo "target platform: $TARGETPLATFORM"
|
RUN apt-get -y purge python3.8 && \
|
||||||
|
apt-get -y autoremove && \
|
||||||
ARG PROTOBUF_VERSION=29.3
|
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||||
|
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
|
||||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
|
||||||
fi
|
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
@@ -45,7 +40,11 @@ RUN mv protoc3/include/* /usr/local/include/
|
|||||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory '*'
|
RUN git config --global --add safe.directory *
|
||||||
|
|
||||||
|
# Install Python dependencies.
|
||||||
|
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
||||||
|
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
@@ -56,11 +55,6 @@ ENV PATH /root/.cargo/bin/:$PATH
|
|||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
|
||||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
|
||||||
# compile from source take too long, so we use the precompiled binary instead
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
|
||||||
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
|
RUN cargo install cargo-binstall --locked
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
48
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
48
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Use the legacy glibc 2.28.
|
||||||
|
FROM ubuntu:18.10
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||||
|
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
unzip \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
# Install protoc.
|
||||||
|
ENV PROTOC_VERSION=25.1
|
||||||
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture"; exit 1; \
|
||||||
|
fi && \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||||
|
rm -f ${PROTOC_ZIP}
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Install Rust toolchains.
|
||||||
|
ARG RUST_TOOLCHAIN
|
||||||
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install nextest.
|
||||||
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
FROM ubuntu:20.04
|
|
||||||
|
|
||||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
|
||||||
ARG DOCKER_BUILD_ROOT=.
|
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
|
||||||
WORKDIR /greptimedb
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
|
||||||
# Install dependencies.
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|
||||||
libssl-dev \
|
|
||||||
tzdata \
|
|
||||||
curl \
|
|
||||||
unzip \
|
|
||||||
ca-certificates \
|
|
||||||
git \
|
|
||||||
build-essential \
|
|
||||||
pkg-config
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
RUN echo "target platform: $TARGETPLATFORM"
|
|
||||||
|
|
||||||
ARG PROTOBUF_VERSION=29.3
|
|
||||||
|
|
||||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
|
||||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
|
||||||
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
|
||||||
fi
|
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
|
||||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
|
||||||
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
|
||||||
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
|
||||||
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
|
||||||
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
|
||||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
|
||||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
|
||||||
# it can be a different user that have prepared the submodules.
|
|
||||||
RUN git config --global --add safe.directory '*'
|
|
||||||
|
|
||||||
# Install Rust.
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
|
||||||
ENV PATH /root/.cargo/bin/:$PATH
|
|
||||||
|
|
||||||
# Install Rust toolchains.
|
|
||||||
ARG RUST_TOOLCHAIN
|
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
|
||||||
|
|
||||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
|
||||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
|
||||||
# compile from source take too long, so we use the precompiled binary instead
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
|
||||||
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
|
||||||
|
|
||||||
# Install nextest.
|
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
|
||||||
@@ -1,185 +0,0 @@
|
|||||||
x-custom:
|
|
||||||
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
|
||||||
etcd_common_settings: &etcd_common_settings
|
|
||||||
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
|
|
||||||
entrypoint: /usr/local/bin/etcd
|
|
||||||
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
|
|
||||||
|
|
||||||
services:
|
|
||||||
etcd0:
|
|
||||||
<<: *etcd_common_settings
|
|
||||||
container_name: etcd0
|
|
||||||
ports:
|
|
||||||
- 2379:2379
|
|
||||||
- 2380:2380
|
|
||||||
command:
|
|
||||||
- --name=etcd0
|
|
||||||
- --data-dir=/var/lib/etcd
|
|
||||||
- --initial-advertise-peer-urls=http://etcd0:2380
|
|
||||||
- --listen-peer-urls=http://0.0.0.0:2380
|
|
||||||
- --listen-client-urls=http://0.0.0.0:2379
|
|
||||||
- --advertise-client-urls=http://etcd0:2379
|
|
||||||
- --heartbeat-interval=250
|
|
||||||
- --election-timeout=1250
|
|
||||||
- --initial-cluster=etcd0=http://etcd0:2380
|
|
||||||
- --initial-cluster-state=new
|
|
||||||
- *etcd_initial_cluster_token
|
|
||||||
volumes:
|
|
||||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
etcd-tls:
|
|
||||||
<<: *etcd_common_settings
|
|
||||||
container_name: etcd-tls
|
|
||||||
ports:
|
|
||||||
- 2378:2378
|
|
||||||
- 2381:2381
|
|
||||||
command:
|
|
||||||
- --name=etcd-tls
|
|
||||||
- --data-dir=/var/lib/etcd
|
|
||||||
- --initial-advertise-peer-urls=https://etcd-tls:2381
|
|
||||||
- --listen-peer-urls=https://0.0.0.0:2381
|
|
||||||
- --listen-client-urls=https://0.0.0.0:2378
|
|
||||||
- --advertise-client-urls=https://etcd-tls:2378
|
|
||||||
- --heartbeat-interval=250
|
|
||||||
- --election-timeout=1250
|
|
||||||
- --initial-cluster=etcd-tls=https://etcd-tls:2381
|
|
||||||
- --initial-cluster-state=new
|
|
||||||
- --initial-cluster-token=etcd-tls-cluster
|
|
||||||
- --cert-file=/certs/server.crt
|
|
||||||
- --key-file=/certs/server-key.pem
|
|
||||||
- --peer-cert-file=/certs/server.crt
|
|
||||||
- --peer-key-file=/certs/server-key.pem
|
|
||||||
- --trusted-ca-file=/certs/ca.crt
|
|
||||||
- --peer-trusted-ca-file=/certs/ca.crt
|
|
||||||
- --client-cert-auth
|
|
||||||
- --peer-client-cert-auth
|
|
||||||
volumes:
|
|
||||||
- ./greptimedb-cluster-docker-compose/etcd-tls:/var/lib/etcd
|
|
||||||
- ./greptimedb-cluster-docker-compose/certs:/certs:ro
|
|
||||||
environment:
|
|
||||||
- ETCDCTL_API=3
|
|
||||||
- ETCDCTL_CACERT=/certs/ca.crt
|
|
||||||
- ETCDCTL_CERT=/certs/server.crt
|
|
||||||
- ETCDCTL_KEY=/certs/server-key.pem
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "etcdctl", "--endpoints=https://etcd-tls:2378", "--cacert=/certs/ca.crt", "--cert=/certs/server.crt", "--key=/certs/server-key.pem", "endpoint", "health" ]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
metasrv:
|
|
||||||
image: *greptimedb_image
|
|
||||||
container_name: metasrv
|
|
||||||
ports:
|
|
||||||
- 3002:3002
|
|
||||||
- 3000:3000
|
|
||||||
command:
|
|
||||||
- metasrv
|
|
||||||
- start
|
|
||||||
- --rpc-bind-addr=0.0.0.0:3002
|
|
||||||
- --rpc-server-addr=metasrv:3002
|
|
||||||
- --store-addrs=etcd0:2379
|
|
||||||
- --http-addr=0.0.0.0:3000
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
depends_on:
|
|
||||||
etcd0:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
datanode0:
|
|
||||||
image: *greptimedb_image
|
|
||||||
container_name: datanode0
|
|
||||||
ports:
|
|
||||||
- 3001:3001
|
|
||||||
- 5000:5000
|
|
||||||
command:
|
|
||||||
- datanode
|
|
||||||
- start
|
|
||||||
- --node-id=0
|
|
||||||
- --data-home=/greptimedb_data
|
|
||||||
- --rpc-bind-addr=0.0.0.0:3001
|
|
||||||
- --rpc-server-addr=datanode0:3001
|
|
||||||
- --metasrv-addrs=metasrv:3002
|
|
||||||
- --http-addr=0.0.0.0:5000
|
|
||||||
volumes:
|
|
||||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 10
|
|
||||||
depends_on:
|
|
||||||
metasrv:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
frontend0:
|
|
||||||
image: *greptimedb_image
|
|
||||||
container_name: frontend0
|
|
||||||
ports:
|
|
||||||
- 4000:4000
|
|
||||||
- 4001:4001
|
|
||||||
- 4002:4002
|
|
||||||
- 4003:4003
|
|
||||||
command:
|
|
||||||
- frontend
|
|
||||||
- start
|
|
||||||
- --metasrv-addrs=metasrv:3002
|
|
||||||
- --http-addr=0.0.0.0:4000
|
|
||||||
- --rpc-bind-addr=0.0.0.0:4001
|
|
||||||
- --mysql-addr=0.0.0.0:4002
|
|
||||||
- --postgres-addr=0.0.0.0:4003
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
depends_on:
|
|
||||||
datanode0:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
flownode0:
|
|
||||||
image: *greptimedb_image
|
|
||||||
container_name: flownode0
|
|
||||||
ports:
|
|
||||||
- 4004:4004
|
|
||||||
- 4005:4005
|
|
||||||
command:
|
|
||||||
- flownode
|
|
||||||
- start
|
|
||||||
- --node-id=0
|
|
||||||
- --metasrv-addrs=metasrv:3002
|
|
||||||
- --rpc-bind-addr=0.0.0.0:4004
|
|
||||||
- --rpc-server-addr=flownode0:4004
|
|
||||||
- --http-addr=0.0.0.0:4005
|
|
||||||
depends_on:
|
|
||||||
frontend0:
|
|
||||||
condition: service_healthy
|
|
||||||
healthcheck:
|
|
||||||
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
networks:
|
|
||||||
- greptimedb
|
|
||||||
|
|
||||||
networks:
|
|
||||||
greptimedb:
|
|
||||||
name: greptimedb
|
|
||||||
5
docker/python/requirements.txt
Normal file
5
docker/python/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
numpy>=1.24.2
|
||||||
|
pandas>=1.5.3
|
||||||
|
pyarrow>=11.0.0
|
||||||
|
requests>=2.28.2
|
||||||
|
scipy>=1.10.1
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 173 KiB |
@@ -1,51 +0,0 @@
|
|||||||
# Log benchmark configuration
|
|
||||||
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
|
|
||||||
|
|
||||||
Here are the versions of databases we used in the benchmark
|
|
||||||
|
|
||||||
| name | version |
|
|
||||||
| :------------ | :--------- |
|
|
||||||
| GreptimeDB | v0.9.2 |
|
|
||||||
| Clickhouse | 24.9.1.219 |
|
|
||||||
| Elasticsearch | 8.15.0 |
|
|
||||||
|
|
||||||
## Structured model vs Unstructured model
|
|
||||||
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
|
|
||||||
|
|
||||||
__Structured model__
|
|
||||||
|
|
||||||
The log data is pre-processed into columns by vector. For example an insert request looks like following
|
|
||||||
```SQL
|
|
||||||
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
|
|
||||||
```
|
|
||||||
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
|
|
||||||
|
|
||||||
__Unstructured model__
|
|
||||||
|
|
||||||
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
|
|
||||||
```SQL
|
|
||||||
INSERT INTO test_table (message, timestamp) VALUES ()
|
|
||||||
```
|
|
||||||
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
|
|
||||||
|
|
||||||
## Creating tables
|
|
||||||
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
|
|
||||||
The mapping of Elastic search is created automatically.
|
|
||||||
|
|
||||||
## Vector Configuration
|
|
||||||
We use vector to generate random log data and send inserts to databases.
|
|
||||||
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
|
|
||||||
|
|
||||||
## SQLs and payloads
|
|
||||||
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
|
|
||||||
|
|
||||||
## Steps to reproduce
|
|
||||||
0. Decide whether to run structured model test or unstructured mode test.
|
|
||||||
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
|
|
||||||
2. Create table in GreptimeDB and Clickhouse in advance.
|
|
||||||
3. Run vector to insert data.
|
|
||||||
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
|
|
||||||
|
|
||||||
## Addition
|
|
||||||
- You can tune GreptimeDB's configuration to get better performance.
|
|
||||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments-administration/configuration#storage-options).
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
-- GreptimeDB create table clause
|
|
||||||
-- structured test, use vector to pre-process log data into fields
|
|
||||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
|
||||||
`bytes` Int64 NULL,
|
|
||||||
`http_version` STRING NULL,
|
|
||||||
`ip` STRING NULL,
|
|
||||||
`method` STRING NULL,
|
|
||||||
`path` STRING NULL,
|
|
||||||
`status` SMALLINT UNSIGNED NULL,
|
|
||||||
`user` STRING NULL,
|
|
||||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
|
||||||
PRIMARY KEY (`user`, `path`, `status`),
|
|
||||||
TIME INDEX (`timestamp`)
|
|
||||||
)
|
|
||||||
ENGINE=mito
|
|
||||||
WITH(
|
|
||||||
append_mode = 'true'
|
|
||||||
);
|
|
||||||
|
|
||||||
-- unstructured test, build fulltext index on message column
|
|
||||||
CREATE TABLE IF NOT EXISTS `test_table` (
|
|
||||||
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
|
|
||||||
`timestamp` TIMESTAMP(3) NOT NULL,
|
|
||||||
TIME INDEX (`timestamp`)
|
|
||||||
)
|
|
||||||
ENGINE=mito
|
|
||||||
WITH(
|
|
||||||
append_mode = 'true'
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Clickhouse create table clause
|
|
||||||
-- structured test
|
|
||||||
CREATE TABLE IF NOT EXISTS test_table
|
|
||||||
(
|
|
||||||
bytes UInt64 NOT NULL,
|
|
||||||
http_version String NOT NULL,
|
|
||||||
ip String NOT NULL,
|
|
||||||
method String NOT NULL,
|
|
||||||
path String NOT NULL,
|
|
||||||
status UInt8 NOT NULL,
|
|
||||||
user String NOT NULL,
|
|
||||||
timestamp String NOT NULL,
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree()
|
|
||||||
ORDER BY (user, path, status);
|
|
||||||
|
|
||||||
-- unstructured test
|
|
||||||
SET allow_experimental_full_text_index = true;
|
|
||||||
CREATE TABLE IF NOT EXISTS test_table
|
|
||||||
(
|
|
||||||
message String,
|
|
||||||
timestamp String,
|
|
||||||
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree()
|
|
||||||
ORDER BY tuple();
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user