mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
1 Commits
docs/vecto
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710a68d2d6 |
@@ -2,16 +2,14 @@
|
|||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --"
|
sqlness = "run --bin sqlness-runner --"
|
||||||
|
|
||||||
[unstable.git]
|
|
||||||
shallow_index = true
|
|
||||||
shallow_deps = true
|
|
||||||
[unstable.gitoxide]
|
|
||||||
fetch = true
|
|
||||||
checkout = true
|
|
||||||
list_files = true
|
|
||||||
internal_use_git2 = false
|
|
||||||
|
|
||||||
[env]
|
[build]
|
||||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
rustflags = [
|
||||||
|
# lints
|
||||||
|
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
|
||||||
|
"-Wclippy::print_stdout",
|
||||||
|
"-Wclippy::print_stderr",
|
||||||
|
"-Wclippy::implicit_clone",
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
root = true
|
|
||||||
|
|
||||||
[*]
|
|
||||||
end_of_line = lf
|
|
||||||
indent_style = space
|
|
||||||
insert_final_newline = true
|
|
||||||
trim_trailing_whitespace = true
|
|
||||||
|
|
||||||
[{Makefile,**.mk}]
|
|
||||||
indent_style = tab
|
|
||||||
21
.env.example
21
.env.example
@@ -14,23 +14,8 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
|||||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||||
# Settings for gcs test
|
# Settings for gcs test
|
||||||
GT_GCS_BUCKET = GCS bucket
|
GT_GCS_BUCKET = GCS bucket
|
||||||
GT_GCS_SCOPE = GCS scope
|
GT_GCS_SCOPE = GCS scope
|
||||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||||
GT_GCS_CREDENTIAL = GCS credential
|
|
||||||
GT_GCS_ENDPOINT = GCS end point
|
GT_GCS_ENDPOINT = GCS end point
|
||||||
# Settings for kafka wal test
|
|
||||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
|
||||||
|
|
||||||
# Setting for fuzz tests
|
|
||||||
GT_MYSQL_ADDR = localhost:4002
|
|
||||||
|
|
||||||
# Setting for unstable fuzz tests
|
|
||||||
GT_FUZZ_BINARY_PATH=/path/to/
|
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
|
||||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
|
||||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
|
||||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
|
||||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
|
||||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
|
||||||
|
|||||||
27
.github/CODEOWNERS
vendored
27
.github/CODEOWNERS
vendored
@@ -1,27 +0,0 @@
|
|||||||
# GreptimeDB CODEOWNERS
|
|
||||||
|
|
||||||
# These owners will be the default owners for everything in the repo.
|
|
||||||
|
|
||||||
* @GreptimeTeam/db-approver
|
|
||||||
|
|
||||||
## [Module] Database Engine
|
|
||||||
/src/index @evenyag @discord9 @WenyXu
|
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
|
||||||
/src/query @evenyag @waynexia @discord9
|
|
||||||
|
|
||||||
## [Module] Distributed
|
|
||||||
/src/common/meta @MichaelScofield @WenyXu
|
|
||||||
/src/common/procedure @MichaelScofield @WenyXu
|
|
||||||
/src/meta-client @MichaelScofield @WenyXu
|
|
||||||
/src/meta-srv @MichaelScofield @WenyXu
|
|
||||||
|
|
||||||
## [Module] Write Ahead Log
|
|
||||||
/src/log-store @v0y4g3r @WenyXu
|
|
||||||
/src/store-api @v0y4g3r @evenyag
|
|
||||||
|
|
||||||
## [Module] Metrics Engine
|
|
||||||
/src/metric-engine @waynexia @WenyXu
|
|
||||||
/src/promql @waynexia @evenyag @discord9
|
|
||||||
|
|
||||||
## [Module] Flow
|
|
||||||
/src/flow @discord9 @waynexia
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Bug report
|
name: Bug report
|
||||||
description: Is something not working? Help us fix it!
|
description: Is something not working? Help us fix it!
|
||||||
labels: [ "C-bug" ]
|
labels: [ "bug" ]
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
@@ -21,7 +21,6 @@ body:
|
|||||||
- Locking issue
|
- Locking issue
|
||||||
- Performance issue
|
- Performance issue
|
||||||
- Unexpected error
|
- Unexpected error
|
||||||
- User Experience
|
|
||||||
- Other
|
- Other
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -34,14 +33,9 @@ body:
|
|||||||
multiple: true
|
multiple: true
|
||||||
options:
|
options:
|
||||||
- Standalone mode
|
- Standalone mode
|
||||||
- Distributed Cluster
|
|
||||||
- Storage Engine
|
|
||||||
- Query Engine
|
|
||||||
- Table Engine
|
|
||||||
- Write Protocols
|
|
||||||
- Metasrv
|
|
||||||
- Frontend
|
- Frontend
|
||||||
- Datanode
|
- Datanode
|
||||||
|
- Meta
|
||||||
- Other
|
- Other
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -83,17 +77,6 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: greptimedb
|
|
||||||
attributes:
|
|
||||||
label: What version of GreptimeDB did you use?
|
|
||||||
description: |
|
|
||||||
Please provide the version of GreptimeDB. For example:
|
|
||||||
0.5.1 etc. You can get it by executing command line `greptime --version`.
|
|
||||||
placeholder: "0.5.1"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: logs
|
id: logs
|
||||||
attributes:
|
attributes:
|
||||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
|||||||
url: https://greptime.com/slack
|
url: https://greptime.com/slack
|
||||||
about: Get free help from the Greptime community
|
about: Get free help from the Greptime community
|
||||||
- name: Greptime Community Discussion
|
- name: Greptime Community Discussion
|
||||||
url: https://github.com/greptimeTeam/discussions
|
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||||
about: Get free help from the Greptime community
|
about: Get free help from the Greptime community
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Enhancement
|
name: Enhancement
|
||||||
description: Suggest an enhancement to existing functionality
|
description: Suggest an enhancement to existing functionality
|
||||||
labels: [ "C-enhancement" ]
|
labels: [ "enhancement" ]
|
||||||
body:
|
body:
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: type
|
id: type
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: New Feature
|
name: Feature request
|
||||||
description: Suggest a new feature for GreptimeDB
|
description: Suggest a new feature for GreptimeDB
|
||||||
labels: [ "C-feature" ]
|
labels: [ "feature request" ]
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
id: info
|
id: info
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
name: Build and push CI Docker image
|
|
||||||
description: Build and push CI Docker image to local registry
|
|
||||||
inputs:
|
|
||||||
binary_path:
|
|
||||||
default: "./bin"
|
|
||||||
description: "Binary path"
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Build and push to local registry
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
|
||||||
push: true
|
|
||||||
tags: localhost:5001/greptime/greptimedb:latest
|
|
||||||
build-args: |
|
|
||||||
BINARY_PATH=${{ inputs.binary_path }}
|
|
||||||
@@ -22,15 +22,15 @@ inputs:
|
|||||||
build-dev-builder-ubuntu:
|
build-dev-builder-ubuntu:
|
||||||
description: Build dev-builder-ubuntu image
|
description: Build dev-builder-ubuntu image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
build-dev-builder-centos:
|
build-dev-builder-centos:
|
||||||
description: Build dev-builder-centos image
|
description: Build dev-builder-centos image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
build-dev-builder-android:
|
build-dev-builder-android:
|
||||||
description: Build dev-builder-android image
|
description: Build dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: "true"
|
default: 'true'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -41,34 +41,27 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Build and push dev-builder-ubuntu image
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
# The latest version will lead to segmentation fault.
|
|
||||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=ubuntu \
|
BASE_IMAGE=ubuntu \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
- name: Build and push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=centos \
|
BASE_IMAGE=centos \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -76,7 +69,8 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
IMAGE_TAG=${{ inputs.version }} && \
|
||||||
|
|
||||||
|
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||||
|
|||||||
35
.github/actions/build-greptime-binary/action.yml
vendored
35
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,31 +24,9 @@ inputs:
|
|||||||
description: Build android artifacts
|
description: Build android artifacts
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
image-namespace:
|
|
||||||
description: Image Namespace
|
|
||||||
required: false
|
|
||||||
default: 'greptime'
|
|
||||||
image-registry:
|
|
||||||
description: Image Registry
|
|
||||||
required: false
|
|
||||||
default: 'docker.io'
|
|
||||||
large-page-size:
|
|
||||||
description: Build GreptimeDB with large page size (65536).
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Set extra build environment variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
|
||||||
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
|
||||||
else
|
|
||||||
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
@@ -57,10 +35,7 @@ runs:
|
|||||||
make build-by-dev-builder \
|
make build-by-dev-builder \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }} \
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
|
||||||
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
@@ -69,7 +44,7 @@ runs:
|
|||||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: ./target/$PROFILE_TARGET/greptime
|
target-file: ./target/$PROFILE_TARGET/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
@@ -78,15 +53,13 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
|
||||||
|
|
||||||
- name: Upload android artifacts
|
- name: Upload android artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: ./target/aarch64-linux-android/release/greptime
|
target-file: ./target/aarch64-linux-android/release/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|||||||
14
.github/actions/build-greptime-images/action.yml
vendored
14
.github/actions/build-greptime-images/action.yml
vendored
@@ -34,8 +34,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -47,17 +47,13 @@ runs:
|
|||||||
password: ${{ inputs.image-registry-password }}
|
password: ${{ inputs.image-registry-password }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Set up qemu for multi-platform builds
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
# The latest version will lead to segmentation fault.
|
|
||||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
|
||||||
|
|
||||||
- name: Set up buildx
|
- name: Set up buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- name: Download amd64 artifacts
|
- name: Download amd64 artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.amd64-artifact-name }}
|
name: ${{ inputs.amd64-artifact-name }}
|
||||||
|
|
||||||
@@ -70,7 +66,7 @@ runs:
|
|||||||
mv ${{ inputs.amd64-artifact-name }} amd64
|
mv ${{ inputs.amd64-artifact-name }} amd64
|
||||||
|
|
||||||
- name: Download arm64 artifacts
|
- name: Download arm64 artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v3
|
||||||
if: ${{ inputs.arm64-artifact-name }}
|
if: ${{ inputs.arm64-artifact-name }}
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.arm64-artifact-name }}
|
name: ${{ inputs.arm64-artifact-name }}
|
||||||
|
|||||||
8
.github/actions/build-images/action.yml
vendored
8
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
@@ -41,8 +41,8 @@ runs:
|
|||||||
image-name: ${{ inputs.image-name }}
|
image-name: ${{ inputs.image-name }}
|
||||||
image-tag: ${{ inputs.version }}
|
image-tag: ${{ inputs.version }}
|
||||||
docker-file: docker/ci/ubuntu/Dockerfile
|
docker-file: docker/ci/ubuntu/Dockerfile
|
||||||
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||||
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||||
|
|
||||||
|
|||||||
44
.github/actions/build-linux-artifacts/action.yml
vendored
44
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -16,21 +16,11 @@ inputs:
|
|||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
default: "false"
|
default: 'false'
|
||||||
image-namespace:
|
|
||||||
description: Image Namespace
|
|
||||||
required: true
|
|
||||||
image-registry:
|
|
||||||
description: Image Registry
|
|
||||||
required: true
|
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
default: .
|
default: .
|
||||||
large-page-size:
|
|
||||||
description: Build GreptimeDB with large page size (65536).
|
|
||||||
required: false
|
|
||||||
default: 'false'
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -40,19 +30,28 @@ runs:
|
|||||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && \
|
cd ${{ inputs.working-dir }} && \
|
||||||
make run-it-in-container BUILD_JOBS=4 \
|
make run-it-in-container BUILD_JOBS=4
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime # Builds standard greptime binary
|
- name: Build standard greptime
|
||||||
|
uses: ./.github/actions/build-greptime-binary
|
||||||
|
with:
|
||||||
|
base-image: ubuntu
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
|
- name: Build greptime without pyo3
|
||||||
|
if: ${{ inputs.dev-mode == 'false' }}
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
@@ -61,9 +60,6 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -72,7 +68,7 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime on centos base image
|
- name: Build greptime on centos base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
@@ -80,19 +76,13 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||||
with:
|
with:
|
||||||
base-image: android
|
base-image: android
|
||||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
image-registry: ${{ inputs.image-registry }}
|
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|||||||
21
.github/actions/build-macos-artifacts/action.yml
vendored
21
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -4,6 +4,9 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
|
rust-toolchain:
|
||||||
|
description: Rust toolchain to use
|
||||||
|
required: true
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -40,9 +43,10 @@ runs:
|
|||||||
brew install protobuf
|
brew install protobuf
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
target: ${{ inputs.arch }}
|
toolchain: ${{ inputs.rust-toolchain }}
|
||||||
|
targets: ${{ inputs.arch }}
|
||||||
|
|
||||||
- name: Start etcd # For integration tests.
|
- name: Start etcd # For integration tests.
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
@@ -55,22 +59,15 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
|
||||||
# linker that prevents backtraces from getting printed correctly.
|
|
||||||
#
|
|
||||||
# <https://github.com/rust-lang/rust/issues/113783>
|
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
run: |
|
run: |
|
||||||
make test sqlness-test
|
make test sqlness-test
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
@@ -78,8 +75,6 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
|
||||||
run: |
|
run: |
|
||||||
make build \
|
make build \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
@@ -90,5 +85,5 @@ runs:
|
|||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ inputs:
|
|||||||
arch:
|
arch:
|
||||||
description: Architecture to build
|
description: Architecture to build
|
||||||
required: true
|
required: true
|
||||||
|
rust-toolchain:
|
||||||
|
description: Rust toolchain to use
|
||||||
|
required: true
|
||||||
cargo-profile:
|
cargo-profile:
|
||||||
description: Cargo profile to build
|
description: Cargo profile to build
|
||||||
required: true
|
required: true
|
||||||
@@ -22,17 +25,27 @@ inputs:
|
|||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v1
|
||||||
|
|
||||||
- name: Install rust toolchain
|
- name: Install rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
target: ${{ inputs.arch }}
|
toolchain: ${{ inputs.rust-toolchain }}
|
||||||
|
targets: ${{ inputs.arch }}
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
|
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
shell: pwsh
|
||||||
|
run: pip install pyarrow
|
||||||
|
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
@@ -46,25 +59,22 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
path: ${{ runner.temp }}/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
|
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
19
.github/actions/fuzz-test/action.yaml
vendored
19
.github/actions/fuzz-test/action.yaml
vendored
@@ -1,19 +0,0 @@
|
|||||||
name: Fuzz Test
|
|
||||||
description: 'Fuzz test given setup and service'
|
|
||||||
inputs:
|
|
||||||
target:
|
|
||||||
description: "The fuzz target to test"
|
|
||||||
required: true
|
|
||||||
max-total-time:
|
|
||||||
description: "Max total time(secs)"
|
|
||||||
required: true
|
|
||||||
unstable:
|
|
||||||
default: 'false'
|
|
||||||
description: "Enable unstable feature"
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Run Fuzz Test
|
|
||||||
shell: bash
|
|
||||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
|
||||||
|
|
||||||
@@ -9,13 +9,13 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v3
|
||||||
|
|
||||||
- name: Create git tag for release
|
- name: Create git tag for release
|
||||||
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
||||||
|
|||||||
24
.github/actions/release-cn-artifacts/action.yaml
vendored
24
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
upload-to-s3:
|
upload-to-s3:
|
||||||
description: Upload to S3
|
description: Upload to S3
|
||||||
required: true
|
required: false
|
||||||
default: 'false'
|
default: 'true'
|
||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -64,34 +64,26 @@ inputs:
|
|||||||
upload-max-retry-times:
|
upload-max-retry-times:
|
||||||
description: Max retry times for uploading artifacts to S3
|
description: Max retry times for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "30"
|
default: "20"
|
||||||
upload-retry-timeout:
|
upload-retry-timeout:
|
||||||
description: Timeout for uploading artifacts to S3
|
description: Timeout for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "120" # minutes
|
default: "30" # minutes
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
path: ${{ inputs.artifacts-dir }}
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
- name: Install s5cmd
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
|
||||||
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
|
||||||
sudo mv s5cmd /usr/local/bin/
|
|
||||||
sudo chmod +x /usr/local/bin/s5cmd
|
|
||||||
|
|
||||||
- name: Release artifacts to cn region
|
- name: Release artifacts to cn region
|
||||||
uses: nick-invision/retry@v2
|
uses: nick-invision/retry@v2
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||||
env:
|
env:
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||||
AWS_REGION: ${{ inputs.aws-cn-region }}
|
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||||
with:
|
with:
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||||
@@ -131,10 +123,10 @@ runs:
|
|||||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
run: |
|
run: |
|
||||||
./.github/scripts/copy-image.sh \
|
./.github/scripts/copy-image.sh \
|
||||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||||
env:
|
env:
|
||||||
|
|||||||
17
.github/actions/setup-chaos/action.yml
vendored
17
.github/actions/setup-chaos/action.yml
vendored
@@ -1,17 +0,0 @@
|
|||||||
name: Setup Kind
|
|
||||||
description: Deploy Kind
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Create kind cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
|
||||||
kubectl create ns chaos-mesh
|
|
||||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
|
||||||
- name: Print Chaos-mesh
|
|
||||||
if: always()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl get po -n chaos-mesh
|
|
||||||
16
.github/actions/setup-cyborg/action.yml
vendored
16
.github/actions/setup-cyborg/action.yml
vendored
@@ -1,16 +0,0 @@
|
|||||||
name: Setup cyborg environment
|
|
||||||
description: Setup cyborg environment
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 22
|
|
||||||
- uses: pnpm/action-setup@v3
|
|
||||||
with:
|
|
||||||
package_json_file: 'cyborg/package.json'
|
|
||||||
run_install: true
|
|
||||||
- name: Describe the Environment
|
|
||||||
working-directory: cyborg
|
|
||||||
shell: bash
|
|
||||||
run: pnpm tsx -v
|
|
||||||
32
.github/actions/setup-etcd-cluster/action.yml
vendored
32
.github/actions/setup-etcd-cluster/action.yml
vendored
@@ -1,32 +0,0 @@
|
|||||||
name: Setup Etcd cluster
|
|
||||||
description: Deploy Etcd cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
etcd-replicas:
|
|
||||||
default: 1
|
|
||||||
description: "Etcd replicas"
|
|
||||||
namespace:
|
|
||||||
default: "etcd-cluster"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Etcd cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
|
||||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
|
||||||
--set resources.requests.cpu=50m \
|
|
||||||
--set resources.requests.memory=128Mi \
|
|
||||||
--set resources.limits.cpu=1500m \
|
|
||||||
--set resources.limits.memory=2Gi \
|
|
||||||
--set auth.rbac.create=false \
|
|
||||||
--set auth.rbac.token.enabled=false \
|
|
||||||
--set persistence.size=2Gi \
|
|
||||||
--create-namespace \
|
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/etcd \
|
|
||||||
--set image.tag=3.6.1-debian-12-r3 \
|
|
||||||
--version 12.0.8 \
|
|
||||||
-n ${{ inputs.namespace }}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
name: Setup GreptimeDB cluster
|
|
||||||
description: Deploy GreptimeDB cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
frontend-replicas:
|
|
||||||
default: 2
|
|
||||||
description: "Number of Frontend replicas"
|
|
||||||
datanode-replicas:
|
|
||||||
default: 2
|
|
||||||
description: "Number of Datanode replicas"
|
|
||||||
meta-replicas:
|
|
||||||
default: 2
|
|
||||||
description: "Number of Metasrv replicas"
|
|
||||||
image-registry:
|
|
||||||
default: "docker.io"
|
|
||||||
description: "Image registry"
|
|
||||||
image-repository:
|
|
||||||
default: "greptime/greptimedb"
|
|
||||||
description: "Image repository"
|
|
||||||
image-tag:
|
|
||||||
default: "latest"
|
|
||||||
description: 'Image tag'
|
|
||||||
etcd-endpoints:
|
|
||||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
|
||||||
description: "Etcd endpoints"
|
|
||||||
values-filename:
|
|
||||||
default: "with-minio.yaml"
|
|
||||||
enable-region-failover:
|
|
||||||
default: false
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install GreptimeDB operator
|
|
||||||
uses: nick-fields/retry@v3
|
|
||||||
with:
|
|
||||||
timeout_minutes: 3
|
|
||||||
max_attempts: 3
|
|
||||||
shell: bash
|
|
||||||
command: |
|
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
|
||||||
helm repo update
|
|
||||||
helm upgrade \
|
|
||||||
--install \
|
|
||||||
--create-namespace \
|
|
||||||
greptimedb-operator greptime/greptimedb-operator \
|
|
||||||
-n greptimedb-admin \
|
|
||||||
--wait \
|
|
||||||
--wait-for-jobs
|
|
||||||
- name: Install GreptimeDB cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install my-greptimedb \
|
|
||||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
|
||||||
--set image.tag=${{ inputs.image-tag }} \
|
|
||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
|
||||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
|
||||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
|
||||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
|
||||||
greptime/greptimedb-cluster \
|
|
||||||
--create-namespace \
|
|
||||||
-n my-greptimedb \
|
|
||||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
|
||||||
--wait \
|
|
||||||
--wait-for-jobs
|
|
||||||
- name: Wait for GreptimeDB
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
while true; do
|
|
||||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
|
||||||
if [ "$PHASE" == "Running" ]; then
|
|
||||||
echo "Cluster is ready"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
|
||||||
kubectl get pods -n my-greptimedb
|
|
||||||
sleep 5 # wait for 5 seconds before check again.
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
- name: Print GreptimeDB info
|
|
||||||
if: always()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl get all --show-labels -n my-greptimedb
|
|
||||||
- name: Describe Nodes
|
|
||||||
if: always()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe nodes
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
cache_path = "/data/greptimedb/s3cache"
|
|
||||||
cache_capacity = "256MB"
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
logging:
|
|
||||||
level: "info"
|
|
||||||
format: "json"
|
|
||||||
filters:
|
|
||||||
- log_store=debug
|
|
||||||
meta:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[wal]
|
|
||||||
provider = "kafka"
|
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
|
||||||
num_topics = 3
|
|
||||||
auto_prune_interval = "30s"
|
|
||||||
trigger_flush_threshold = 100
|
|
||||||
|
|
||||||
[datanode]
|
|
||||||
[datanode.client]
|
|
||||||
timeout = "120s"
|
|
||||||
datanode:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
compact_rt_size = 2
|
|
||||||
|
|
||||||
[wal]
|
|
||||||
provider = "kafka"
|
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
|
||||||
overwrite_entry_start_id = true
|
|
||||||
frontend:
|
|
||||||
configData: |-
|
|
||||||
[runtime]
|
|
||||||
global_rt_size = 4
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
ddl_timeout = "120s"
|
|
||||||
objectStorage:
|
|
||||||
s3:
|
|
||||||
bucket: default
|
|
||||||
region: us-west-2
|
|
||||||
root: test-root
|
|
||||||
endpoint: http://minio.minio.svc.cluster.local
|
|
||||||
credentials:
|
|
||||||
accessKeyId: rootuser
|
|
||||||
secretAccessKey: rootpass123
|
|
||||||
remoteWal:
|
|
||||||
enabled: true
|
|
||||||
kafka:
|
|
||||||
brokerEndpoints:
|
|
||||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
|
||||||
30
.github/actions/setup-kafka-cluster/action.yml
vendored
30
.github/actions/setup-kafka-cluster/action.yml
vendored
@@ -1,30 +0,0 @@
|
|||||||
name: Setup Kafka cluster
|
|
||||||
description: Deploy Kafka cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
controller-replicas:
|
|
||||||
default: 3
|
|
||||||
description: "Kafka controller replicas"
|
|
||||||
namespace:
|
|
||||||
default: "kafka-cluster"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Kafka cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
|
||||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
|
||||||
--set controller.resources.requests.cpu=50m \
|
|
||||||
--set controller.resources.requests.memory=128Mi \
|
|
||||||
--set controller.resources.limits.cpu=2000m \
|
|
||||||
--set controller.resources.limits.memory=2Gi \
|
|
||||||
--set listeners.controller.protocol=PLAINTEXT \
|
|
||||||
--set listeners.client.protocol=PLAINTEXT \
|
|
||||||
--create-namespace \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/kafka \
|
|
||||||
--set image.tag=3.9.0-debian-12-r1 \
|
|
||||||
--version 31.0.0 \
|
|
||||||
-n ${{ inputs.namespace }}
|
|
||||||
10
.github/actions/setup-kind/action.yml
vendored
10
.github/actions/setup-kind/action.yml
vendored
@@ -1,10 +0,0 @@
|
|||||||
name: Setup Kind
|
|
||||||
description: Deploy Kind
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Create kind cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
./.github/scripts/kind-with-registry.sh
|
|
||||||
24
.github/actions/setup-minio/action.yml
vendored
24
.github/actions/setup-minio/action.yml
vendored
@@ -1,24 +0,0 @@
|
|||||||
name: Setup Minio cluster
|
|
||||||
description: Deploy Minio cluster on Kubernetes
|
|
||||||
inputs:
|
|
||||||
replicas:
|
|
||||||
default: 1
|
|
||||||
description: "replicas"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Etcd cluster
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm repo add minio https://charts.min.io/
|
|
||||||
helm upgrade --install minio \
|
|
||||||
--set resources.requests.memory=128Mi \
|
|
||||||
--set replicas=${{ inputs.replicas }} \
|
|
||||||
--set mode=standalone \
|
|
||||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
|
||||||
--set buckets[0].name=default \
|
|
||||||
--set service.port=80,service.targetPort=9000 \
|
|
||||||
minio/minio \
|
|
||||||
--create-namespace \
|
|
||||||
-n minio
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
name: Setup PostgreSQL
|
|
||||||
description: Deploy PostgreSQL on Kubernetes
|
|
||||||
inputs:
|
|
||||||
postgres-replicas:
|
|
||||||
default: 1
|
|
||||||
description: "Number of PostgreSQL replicas"
|
|
||||||
namespace:
|
|
||||||
default: "postgres-namespace"
|
|
||||||
description: "The PostgreSQL namespace"
|
|
||||||
storage-size:
|
|
||||||
default: "1Gi"
|
|
||||||
description: "Storage size for PostgreSQL"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install PostgreSQL
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
helm upgrade \
|
|
||||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
|
||||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/postgresql \
|
|
||||||
--set image.tag=17.5.0-debian-12-r3 \
|
|
||||||
--version 16.7.4 \
|
|
||||||
--set persistence.size=${{ inputs.storage-size }} \
|
|
||||||
--set postgresql.username=greptimedb \
|
|
||||||
--set postgresql.password=admin \
|
|
||||||
--create-namespace \
|
|
||||||
-n ${{ inputs.namespace }}
|
|
||||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,14 +57,3 @@ runs:
|
|||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
run: |
|
run: |
|
||||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||||
- name: Export kind logs
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
|
||||||
- name: Upload logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: kind-logs
|
|
||||||
path: /tmp/kind
|
|
||||||
retention-days: 3
|
|
||||||
|
|||||||
4
.github/actions/start-runner/action.yml
vendored
4
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2.3.8
|
uses: machulav/ec2-github-runner@v2
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
4
.github/actions/stop-runner/action.yml
vendored
4
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Configure AWS credentials
|
- name: Configure AWS credentials
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
with:
|
with:
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2.3.8
|
uses: machulav/ec2-github-runner@v2
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
27
.github/actions/upload-artifacts/action.yml
vendored
27
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,9 +4,9 @@ inputs:
|
|||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: true
|
required: true
|
||||||
target-files:
|
target-file:
|
||||||
description: The multiple target files to upload, separated by comma
|
description: The path of the target artifact
|
||||||
required: false
|
required: true
|
||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
required: true
|
required: true
|
||||||
@@ -18,21 +18,16 @@ runs:
|
|||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Create artifacts directory
|
- name: Create artifacts directory
|
||||||
if: ${{ inputs.target-files != '' }}
|
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
set -e
|
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||||
mkdir -p ${{ inputs.artifacts-dir }}
|
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||||
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
|
||||||
for file in "${FILES[@]}"; do
|
|
||||||
cp "$file" ${{ inputs.artifacts-dir }}/
|
|
||||||
done
|
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-v0.3.0sha256sum
|
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-v0.3.0.tar.gz
|
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-v0.3.0
|
# greptime-linux-amd64-pyo3-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
@@ -54,15 +49,15 @@ runs:
|
|||||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|
||||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||||
# However, when we use 'actions/download-artifact' to download the artifacts, it will be automatically unzipped.
|
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.artifacts-dir }}
|
name: ${{ inputs.artifacts-dir }}
|
||||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||||
|
|
||||||
- name: Upload checksum
|
- name: Upload checksum
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|||||||
3
.github/cargo-blacklist.txt
vendored
3
.github/cargo-blacklist.txt
vendored
@@ -1,3 +0,0 @@
|
|||||||
native-tls
|
|
||||||
openssl
|
|
||||||
aws-lc-sys
|
|
||||||
4
.github/doc-label-config.yml
vendored
Normal file
4
.github/doc-label-config.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Doc not needed:
|
||||||
|
- '- \[x\] This PR does not require documentation updates.'
|
||||||
|
Doc update required:
|
||||||
|
- '- \[ \] This PR does not require documentation updates.'
|
||||||
15
.github/labeler.yaml
vendored
15
.github/labeler.yaml
vendored
@@ -1,15 +0,0 @@
|
|||||||
ci:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: .github/**
|
|
||||||
|
|
||||||
docker:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docker/**
|
|
||||||
|
|
||||||
documentation:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docs/**
|
|
||||||
|
|
||||||
dashboard:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: grafana/**
|
|
||||||
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"LABEL": {
|
||||||
|
"name": "breaking change",
|
||||||
|
"color": "D93F0B"
|
||||||
|
},
|
||||||
|
"CHECKS": {
|
||||||
|
"regexp": "^(?:(?!!:).)*$",
|
||||||
|
"ignoreLabels": [
|
||||||
|
"ignore-title"
|
||||||
|
],
|
||||||
|
"alwaysPassCI": true
|
||||||
|
}
|
||||||
|
}
|
||||||
12
.github/pr-title-checker-config.json
vendored
Normal file
12
.github/pr-title-checker-config.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"LABEL": {
|
||||||
|
"name": "Invalid PR Title",
|
||||||
|
"color": "B60205"
|
||||||
|
},
|
||||||
|
"CHECKS": {
|
||||||
|
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||||
|
"ignoreLabels": [
|
||||||
|
"ignore-title"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
22
.github/pull_request_template.md
vendored
22
.github/pull_request_template.md
vendored
@@ -1,11 +1,8 @@
|
|||||||
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
|
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
|
||||||
|
|
||||||
## Refer to a related PR or issue link (optional)
|
|
||||||
|
|
||||||
## What's changed and what's your intention?
|
## What's changed and what's your intention?
|
||||||
|
|
||||||
<!--
|
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
|
||||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
|
||||||
|
|
||||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||||
|
|
||||||
@@ -13,14 +10,11 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||||
- Describe any limitations of the current code (optional)
|
- Describe any limitations of the current code (optional)
|
||||||
- Describe if this PR will break **API or data compatibility** (optional)
|
|
||||||
-->
|
|
||||||
|
|
||||||
## PR Checklist
|
## Checklist
|
||||||
Please convert it to a draft if some of the following conditions are not met.
|
|
||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
- [ ] This PR requires documentation updates.
|
- [ ] This PR does not require documentation updates.
|
||||||
- [ ] API changes are backward compatible.
|
|
||||||
- [ ] Schema or data changes are backward compatible.
|
## Refer to a related PR or issue link (optional)
|
||||||
|
|||||||
14
.github/scripts/check-install-script.sh
vendored
14
.github/scripts/check-install-script.sh
vendored
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
|
||||||
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
|
||||||
|
|
||||||
echo "Downloading the latest version: $VERSION"
|
|
||||||
|
|
||||||
# Download the install script
|
|
||||||
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
|
||||||
|
|
||||||
# Execute the `greptime` command
|
|
||||||
./greptime --version
|
|
||||||
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Get current version
|
|
||||||
CURRENT_VERSION=$1
|
|
||||||
if [ -z "$CURRENT_VERSION" ]; then
|
|
||||||
echo "Error: Failed to get current version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version from GitHub Releases
|
|
||||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
|
||||||
|
|
||||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
|
||||||
echo "Error: Failed to fetch latest version from GitHub"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version
|
|
||||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
|
||||||
|
|
||||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
|
||||||
echo "Error: No valid version found in GitHub releases"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
|
||||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
|
|
||||||
echo "Current version: $CLEAN_CURRENT"
|
|
||||||
echo "Latest release version: $CLEAN_LATEST"
|
|
||||||
|
|
||||||
# Use sort -V to compare versions
|
|
||||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
|
||||||
|
|
||||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
|
||||||
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
|
||||||
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,25 +8,24 @@ set -e
|
|||||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||||
function create_version() {
|
function create_version() {
|
||||||
# Read from environment variables.
|
# Read from envrionment variables.
|
||||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
echo "GITHUB_EVENT_NAME is empty"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
echo "NEXT_RELEASE_VERSION is empty"
|
||||||
# NOTE: Need a `v` prefix for the version string.
|
exit 1
|
||||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||||
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||||
# It will be like 'nightly-20230808-7d0d8dc6'.
|
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -36,7 +35,7 @@ function create_version() {
|
|||||||
# It will be like 'dev-2023080819-f0e7216c'.
|
# It will be like 'dev-2023080819-f0e7216c'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||||
if [ -z "$COMMIT_SHA" ]; then
|
if [ -z "$COMMIT_SHA" ]; then
|
||||||
echo "COMMIT_SHA is empty in dev build" >&2
|
echo "COMMIT_SHA is empty in dev build"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||||
@@ -46,7 +45,7 @@ function create_version() {
|
|||||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
echo "GITHUB_REF_NAME is empty in push event"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "$GITHUB_REF_NAME"
|
echo "$GITHUB_REF_NAME"
|
||||||
@@ -55,15 +54,15 @@ function create_version() {
|
|||||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||||
else
|
else
|
||||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# You can run as following examples:
|
# You can run as following examples:
|
||||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||||
create_version
|
create_version
|
||||||
|
|||||||
52
.github/scripts/deploy-greptimedb.sh
vendored
52
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,18 +3,14 @@
|
|||||||
set -e
|
set -e
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
|
|
||||||
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
|
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
|
||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
|
||||||
|
|
||||||
# Create a cluster with 1 control-plane node and 5 workers.
|
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
@@ -39,16 +35,10 @@ function add_greptime_chart() {
|
|||||||
function deploy_etcd_cluster() {
|
function deploy_etcd_cluster() {
|
||||||
local namespace="$1"
|
local namespace="$1"
|
||||||
|
|
||||||
helm upgrade --install etcd "$ETCD_CHART" \
|
helm install etcd "$ETCD_CHART" \
|
||||||
--version "$ETCD_CHART_VERSION" \
|
|
||||||
--create-namespace \
|
|
||||||
--set replicaCount=3 \
|
--set replicaCount=3 \
|
||||||
--set auth.rbac.create=false \
|
--set auth.rbac.create=false \
|
||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/etcd \
|
|
||||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
|
||||||
-n "$namespace"
|
-n "$namespace"
|
||||||
|
|
||||||
# Wait for etcd cluster to be ready.
|
# Wait for etcd cluster to be ready.
|
||||||
@@ -58,9 +48,8 @@ function deploy_etcd_cluster() {
|
|||||||
# Deploy greptimedb-operator.
|
# Deploy greptimedb-operator.
|
||||||
function deploy_greptimedb_operator() {
|
function deploy_greptimedb_operator() {
|
||||||
# Use the latest chart and image.
|
# Use the latest chart and image.
|
||||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||||
--create-namespace \
|
--set image.tag=latest \
|
||||||
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
|
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
# Wait for greptimedb-operator to be ready.
|
# Wait for greptimedb-operator to be ready.
|
||||||
@@ -77,12 +66,9 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -115,18 +101,15 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
deploy_etcd_cluster "$install_namespace"
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
--set objectStorage.s3.region="$AWS_REGION" \
|
--set storage.credentials.secretName=s3-credentials \
|
||||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||||
--set objectStorage.credentials.secretName=s3-credentials \
|
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
|
||||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
while true; do
|
while true; do
|
||||||
@@ -151,8 +134,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
# Deploy standalone greptimedb.
|
# Deploy standalone greptimedb.
|
||||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||||
function deploy_standalone_greptimedb() {
|
function deploy_standalone_greptimedb() {
|
||||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||||
--create-namespace \
|
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
|||||||
66
.github/scripts/kind-with-registry.sh
vendored
66
.github/scripts/kind-with-registry.sh
vendored
@@ -1,66 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# 1. Create registry container unless it already exists
|
|
||||||
reg_name='kind-registry'
|
|
||||||
reg_port='5001'
|
|
||||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
|
||||||
docker run \
|
|
||||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
|
||||||
registry:2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 2. Create kind cluster with containerd registry config dir enabled
|
|
||||||
# TODO: kind will eventually enable this by default and this patch will
|
|
||||||
# be unnecessary.
|
|
||||||
#
|
|
||||||
# See:
|
|
||||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
|
||||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
|
||||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
|
||||||
cat <<EOF | kind create cluster --wait 2m --config=-
|
|
||||||
kind: Cluster
|
|
||||||
apiVersion: kind.x-k8s.io/v1alpha4
|
|
||||||
containerdConfigPatches:
|
|
||||||
- |-
|
|
||||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
|
||||||
config_path = "/etc/containerd/certs.d"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# 3. Add the registry config to the nodes
|
|
||||||
#
|
|
||||||
# This is necessary because localhost resolves to loopback addresses that are
|
|
||||||
# network-namespace local.
|
|
||||||
# In other words: localhost in the container is not localhost on the host.
|
|
||||||
#
|
|
||||||
# We want a consistent name that works from both ends, so we tell containerd to
|
|
||||||
# alias localhost:${reg_port} to the registry container when pulling images
|
|
||||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
|
||||||
for node in $(kind get nodes); do
|
|
||||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
|
||||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
|
||||||
[host."http://${reg_name}:5000"]
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
|
|
||||||
# 4. Connect the registry to the cluster network if not already connected
|
|
||||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
|
||||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
|
||||||
docker network connect "kind" "${reg_name}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 5. Document the local registry
|
|
||||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: local-registry-hosting
|
|
||||||
namespace: kube-public
|
|
||||||
data:
|
|
||||||
localRegistryHosting.v1: |
|
|
||||||
host: "localhost:${reg_port}"
|
|
||||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
|
||||||
EOF
|
|
||||||
507
.github/scripts/package-lock.json
generated
vendored
507
.github/scripts/package-lock.json
generated
vendored
@@ -1,507 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"lockfileVersion": 3,
|
|
||||||
"requires": true,
|
|
||||||
"packages": {
|
|
||||||
"": {
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/rest": "^21.0.0",
|
|
||||||
"axios": "^1.7.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/auth-token": {
|
|
||||||
"version": "5.1.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
|
|
||||||
"integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/core": {
|
|
||||||
"version": "6.1.6",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.6.tgz",
|
|
||||||
"integrity": "sha512-kIU8SLQkYWGp3pVKiYzA5OSaNF5EE03P/R8zEmmrG6XwOg5oBjXyQVVIauQ0dgau4zYhpZEhJrvIYt6oM+zZZA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/auth-token": "^5.0.0",
|
|
||||||
"@octokit/graphql": "^8.2.2",
|
|
||||||
"@octokit/request": "^9.2.3",
|
|
||||||
"@octokit/request-error": "^6.1.8",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"before-after-hook": "^3.0.2",
|
|
||||||
"universal-user-agent": "^7.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/endpoint": {
|
|
||||||
"version": "10.1.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.4.tgz",
|
|
||||||
"integrity": "sha512-OlYOlZIsfEVZm5HCSR8aSg02T2lbUWOsCQoPKfTXJwDzcHQBrVBGdGXb89dv2Kw2ToZaRtudp8O3ZIYoaOjKlA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"universal-user-agent": "^7.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/graphql": {
|
|
||||||
"version": "8.2.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.2.tgz",
|
|
||||||
"integrity": "sha512-Yi8hcoqsrXGdt0yObxbebHXFOiUA+2v3n53epuOg1QUgOB6c4XzvisBNVXJSl8RYA5KrDuSL2yq9Qmqe5N0ryA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/request": "^9.2.3",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"universal-user-agent": "^7.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "25.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-25.1.0.tgz",
|
|
||||||
"integrity": "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest": {
|
|
||||||
"version": "11.6.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.6.0.tgz",
|
|
||||||
"integrity": "sha512-n5KPteiF7pWKgBIBJSk8qzoZWcUkza2O6A0za97pMGVrGfPdltxrfmfF5GucHYvHGZD8BdaZmmHGz5cX/3gdpw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^13.10.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "24.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
|
||||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": {
|
|
||||||
"version": "13.10.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
|
||||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^24.2.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-request-log": {
|
|
||||||
"version": "5.3.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
|
|
||||||
"integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods": {
|
|
||||||
"version": "13.5.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.5.0.tgz",
|
|
||||||
"integrity": "sha512-9Pas60Iv9ejO3WlAX3maE1+38c5nqbJXV5GrncEfkndIpZrJ/WPMRd2xYDcPPEt5yzpxcjw9fWNoPhsSGzqKqw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^13.10.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"@octokit/core": ">=6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/openapi-types": {
|
|
||||||
"version": "24.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
|
|
||||||
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/types": {
|
|
||||||
"version": "13.10.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
|
|
||||||
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^24.2.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/request": {
|
|
||||||
"version": "9.2.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.4.tgz",
|
|
||||||
"integrity": "sha512-q8ybdytBmxa6KogWlNa818r0k1wlqzNC+yNkcQDECHvQo8Vmstrg18JwqJHdJdUiHD2sjlwBgSm9kHkOKe2iyA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/endpoint": "^10.1.4",
|
|
||||||
"@octokit/request-error": "^6.1.8",
|
|
||||||
"@octokit/types": "^14.0.0",
|
|
||||||
"fast-content-type-parse": "^2.0.0",
|
|
||||||
"universal-user-agent": "^7.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/request-error": {
|
|
||||||
"version": "6.1.8",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.8.tgz",
|
|
||||||
"integrity": "sha512-WEi/R0Jmq+IJKydWlKDmryPcmdYSVjL3ekaiEL1L9eo1sUnqMJ+grqmC9cjk7CA7+b2/T397tO5d8YLOH3qYpQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/types": "^14.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/rest": {
|
|
||||||
"version": "21.1.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
|
|
||||||
"integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/core": "^6.1.4",
|
|
||||||
"@octokit/plugin-paginate-rest": "^11.4.2",
|
|
||||||
"@octokit/plugin-request-log": "^5.3.1",
|
|
||||||
"@octokit/plugin-rest-endpoint-methods": "^13.3.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@octokit/types": {
|
|
||||||
"version": "14.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-14.1.0.tgz",
|
|
||||||
"integrity": "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/openapi-types": "^25.1.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/asynckit": {
|
|
||||||
"version": "0.4.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
|
||||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/axios": {
|
|
||||||
"version": "1.12.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
|
|
||||||
"integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"follow-redirects": "^1.15.6",
|
|
||||||
"form-data": "^4.0.4",
|
|
||||||
"proxy-from-env": "^1.1.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/before-after-hook": {
|
|
||||||
"version": "3.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
|
|
||||||
"integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
|
|
||||||
"license": "Apache-2.0"
|
|
||||||
},
|
|
||||||
"node_modules/call-bind-apply-helpers": {
|
|
||||||
"version": "1.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
|
||||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"function-bind": "^1.1.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/combined-stream": {
|
|
||||||
"version": "1.0.8",
|
|
||||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
|
||||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"delayed-stream": "~1.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.8"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/delayed-stream": {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
|
||||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=0.4.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/dunder-proto": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"call-bind-apply-helpers": "^1.0.1",
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"gopd": "^1.2.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-define-property": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-errors": {
|
|
||||||
"version": "1.3.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
|
||||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-object-atoms": {
|
|
||||||
"version": "1.1.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
|
||||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/es-set-tostringtag": {
|
|
||||||
"version": "2.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
|
||||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"get-intrinsic": "^1.2.6",
|
|
||||||
"has-tostringtag": "^1.0.2",
|
|
||||||
"hasown": "^2.0.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/fast-content-type-parse": {
|
|
||||||
"version": "2.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
|
|
||||||
"integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
|
|
||||||
"funding": [
|
|
||||||
{
|
|
||||||
"type": "github",
|
|
||||||
"url": "https://github.com/sponsors/fastify"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/fastify"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/follow-redirects": {
|
|
||||||
"version": "1.15.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
|
|
||||||
"integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
|
|
||||||
"funding": [
|
|
||||||
{
|
|
||||||
"type": "individual",
|
|
||||||
"url": "https://github.com/sponsors/RubenVerborgh"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=4.0"
|
|
||||||
},
|
|
||||||
"peerDependenciesMeta": {
|
|
||||||
"debug": {
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/form-data": {
|
|
||||||
"version": "4.0.4",
|
|
||||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
|
||||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"asynckit": "^0.4.0",
|
|
||||||
"combined-stream": "^1.0.8",
|
|
||||||
"es-set-tostringtag": "^2.1.0",
|
|
||||||
"hasown": "^2.0.2",
|
|
||||||
"mime-types": "^2.1.12"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/function-bind": {
|
|
||||||
"version": "1.1.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
|
||||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
|
||||||
"license": "MIT",
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/get-intrinsic": {
|
|
||||||
"version": "1.3.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
|
||||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"call-bind-apply-helpers": "^1.0.2",
|
|
||||||
"es-define-property": "^1.0.1",
|
|
||||||
"es-errors": "^1.3.0",
|
|
||||||
"es-object-atoms": "^1.1.1",
|
|
||||||
"function-bind": "^1.1.2",
|
|
||||||
"get-proto": "^1.0.1",
|
|
||||||
"gopd": "^1.2.0",
|
|
||||||
"has-symbols": "^1.1.0",
|
|
||||||
"hasown": "^2.0.2",
|
|
||||||
"math-intrinsics": "^1.1.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/get-proto": {
|
|
||||||
"version": "1.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
|
||||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"dunder-proto": "^1.0.1",
|
|
||||||
"es-object-atoms": "^1.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/gopd": {
|
|
||||||
"version": "1.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
|
||||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/has-symbols": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/has-tostringtag": {
|
|
||||||
"version": "1.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
|
||||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"has-symbols": "^1.0.3"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/hasown": {
|
|
||||||
"version": "2.0.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
|
||||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"function-bind": "^1.1.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/math-intrinsics": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.4"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/mime-db": {
|
|
||||||
"version": "1.52.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
|
||||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/mime-types": {
|
|
||||||
"version": "2.1.35",
|
|
||||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
|
||||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"mime-db": "1.52.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 0.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/proxy-from-env": {
|
|
||||||
"version": "1.1.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
|
||||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/universal-user-agent": {
|
|
||||||
"version": "7.0.3",
|
|
||||||
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz",
|
|
||||||
"integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==",
|
|
||||||
"license": "ISC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
10
.github/scripts/package.json
vendored
10
.github/scripts/package.json
vendored
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "greptimedb-github-scripts",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"type": "module",
|
|
||||||
"description": "GitHub automation scripts for GreptimeDB",
|
|
||||||
"dependencies": {
|
|
||||||
"@octokit/rest": "^21.0.0",
|
|
||||||
"axios": "^1.7.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
152
.github/scripts/pr-review-reminder.js
vendored
152
.github/scripts/pr-review-reminder.js
vendored
@@ -1,152 +0,0 @@
|
|||||||
// Daily PR Review Reminder Script
|
|
||||||
// Fetches open PRs from GreptimeDB repository and sends Slack notifications
|
|
||||||
// to PR owners and assigned reviewers to keep review process moving.
|
|
||||||
|
|
||||||
(async () => {
|
|
||||||
const { Octokit } = await import("@octokit/rest");
|
|
||||||
const { default: axios } = await import('axios');
|
|
||||||
|
|
||||||
// Configuration
|
|
||||||
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
|
|
||||||
const SLACK_WEBHOOK_URL = process.env.SLACK_PR_REVIEW_WEBHOOK_URL;
|
|
||||||
const REPO_OWNER = "GreptimeTeam";
|
|
||||||
const REPO_NAME = "greptimedb";
|
|
||||||
const GITHUB_TO_SLACK = JSON.parse(process.env.GITHUBID_SLACKID_MAPPING || '{}');
|
|
||||||
|
|
||||||
// Debug: Print environment variable status
|
|
||||||
console.log("=== Environment Variables Debug ===");
|
|
||||||
console.log(`GITHUB_TOKEN: ${GITHUB_TOKEN ? 'Set ✓' : 'NOT SET ✗'}`);
|
|
||||||
console.log(`SLACK_PR_REVIEW_WEBHOOK_URL: ${SLACK_WEBHOOK_URL ? 'Set ✓' : 'NOT SET ✗'}`);
|
|
||||||
console.log(`GITHUBID_SLACKID_MAPPING: ${process.env.GITHUBID_SLACKID_MAPPING ? `Set ✓ (${Object.keys(GITHUB_TO_SLACK).length} mappings)` : 'NOT SET ✗'}`);
|
|
||||||
console.log("===================================\n");
|
|
||||||
|
|
||||||
const octokit = new Octokit({
|
|
||||||
auth: GITHUB_TOKEN
|
|
||||||
});
|
|
||||||
|
|
||||||
// Fetch all open PRs from the repository
|
|
||||||
async function fetchOpenPRs() {
|
|
||||||
try {
|
|
||||||
const prs = await octokit.pulls.list({
|
|
||||||
owner: REPO_OWNER,
|
|
||||||
repo: REPO_NAME,
|
|
||||||
state: "open",
|
|
||||||
per_page: 100,
|
|
||||||
sort: "created",
|
|
||||||
direction: "asc"
|
|
||||||
});
|
|
||||||
return prs.data.filter((pr) => !pr.draft);
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error fetching PRs:", error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert GitHub username to Slack mention or fallback to GitHub username
|
|
||||||
function toSlackMention(githubUser) {
|
|
||||||
const slackUserId = GITHUB_TO_SLACK[githubUser];
|
|
||||||
return slackUserId ? `<@${slackUserId}>` : `@${githubUser}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate days since PR was opened
|
|
||||||
function getDaysOpen(createdAt) {
|
|
||||||
const created = new Date(createdAt);
|
|
||||||
const now = new Date();
|
|
||||||
const diffMs = now - created;
|
|
||||||
const days = Math.floor(diffMs / (1000 * 60 * 60 * 24));
|
|
||||||
return days;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build Slack notification message from PR list
|
|
||||||
function buildSlackMessage(prs) {
|
|
||||||
if (prs.length === 0) {
|
|
||||||
return "*🎉 Great job! No pending PRs for review.*";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separate PRs by age threshold (14 days)
|
|
||||||
const criticalPRs = [];
|
|
||||||
const recentPRs = [];
|
|
||||||
|
|
||||||
prs.forEach(pr => {
|
|
||||||
const daysOpen = getDaysOpen(pr.created_at);
|
|
||||||
if (daysOpen >= 14) {
|
|
||||||
criticalPRs.push(pr);
|
|
||||||
} else {
|
|
||||||
recentPRs.push(pr);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
const lines = [
|
|
||||||
`*🔍 Daily PR Review Reminder 🔍*`,
|
|
||||||
`Found *${criticalPRs.length}* critical PR(s) (14+ days old)\n`
|
|
||||||
];
|
|
||||||
|
|
||||||
// Show critical PRs (14+ days) in detail
|
|
||||||
if (criticalPRs.length > 0) {
|
|
||||||
criticalPRs.forEach((pr, index) => {
|
|
||||||
const owner = toSlackMention(pr.user.login);
|
|
||||||
const reviewers = pr.requested_reviewers || [];
|
|
||||||
const reviewerMentions = reviewers.map(r => toSlackMention(r.login)).join(", ");
|
|
||||||
const daysOpen = getDaysOpen(pr.created_at);
|
|
||||||
|
|
||||||
const prInfo = `${index + 1}. <${pr.html_url}|#${pr.number}: ${pr.title}>`;
|
|
||||||
const ageInfo = ` 🔴 Opened *${daysOpen}* day(s) ago`;
|
|
||||||
const ownerInfo = ` 👤 Owner: ${owner}`;
|
|
||||||
const reviewerInfo = reviewers.length > 0
|
|
||||||
? ` 👁️ Reviewers: ${reviewerMentions}`
|
|
||||||
: ` 👁️ Reviewers: _Not assigned yet_`;
|
|
||||||
|
|
||||||
lines.push(prInfo);
|
|
||||||
lines.push(ageInfo);
|
|
||||||
lines.push(ownerInfo);
|
|
||||||
lines.push(reviewerInfo);
|
|
||||||
lines.push(""); // Empty line between PRs
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
lines.push("_Let's keep the code review process moving! 🚀_");
|
|
||||||
|
|
||||||
return lines.join("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send notification to Slack webhook
|
|
||||||
async function sendSlackNotification(message) {
|
|
||||||
if (!SLACK_WEBHOOK_URL) {
|
|
||||||
console.log("⚠️ SLACK_PR_REVIEW_WEBHOOK_URL not configured. Message preview:");
|
|
||||||
console.log("=".repeat(60));
|
|
||||||
console.log(message);
|
|
||||||
console.log("=".repeat(60));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await axios.post(SLACK_WEBHOOK_URL, {
|
|
||||||
text: message
|
|
||||||
});
|
|
||||||
|
|
||||||
if (response.status !== 200) {
|
|
||||||
throw new Error(`Slack API returned status ${response.status}`);
|
|
||||||
}
|
|
||||||
console.log("Slack notification sent successfully.");
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error sending Slack notification:", error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main execution flow
|
|
||||||
async function run() {
|
|
||||||
console.log(`Fetching open PRs from ${REPO_OWNER}/${REPO_NAME}...`);
|
|
||||||
const prs = await fetchOpenPRs();
|
|
||||||
console.log(`Found ${prs.length} open PR(s).`);
|
|
||||||
|
|
||||||
const message = buildSlackMessage(prs);
|
|
||||||
console.log("Sending Slack notification...");
|
|
||||||
await sendSlackNotification(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
run().catch(error => {
|
|
||||||
console.error("Script execution failed:", error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
||||||
})();
|
|
||||||
34
.github/scripts/pull-test-deps-images.sh
vendored
34
.github/scripts/pull-test-deps-images.sh
vendored
@@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
MAX_RETRIES=3
|
|
||||||
|
|
||||||
IMAGES=(
|
|
||||||
"greptime/zookeeper:3.7"
|
|
||||||
"greptime/kafka:3.9.0-debian-12-r1"
|
|
||||||
"greptime/etcd:3.6.1-debian-12-r3"
|
|
||||||
"greptime/minio:2024"
|
|
||||||
"greptime/mysql:5.7"
|
|
||||||
)
|
|
||||||
|
|
||||||
for image in "${IMAGES[@]}"; do
|
|
||||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
|
||||||
if docker pull "$image"; then
|
|
||||||
# Successfully pulled the image.
|
|
||||||
break
|
|
||||||
else
|
|
||||||
# Use some simple exponential backoff to avoid rate limiting.
|
|
||||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
|
||||||
sleep_seconds=$((attempt * 5))
|
|
||||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
|
||||||
sleep $sleep_seconds # 5s, 10s delays
|
|
||||||
else
|
|
||||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
37
.github/scripts/update-dev-builder-version.sh
vendored
@@ -1,37 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
DEV_BUILDER_IMAGE_TAG=$1
|
|
||||||
|
|
||||||
update_dev_builder_version() {
|
|
||||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
|
||||||
echo "Error: Should specify the dev-builder image tag"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email greptimedb-ci@greptime.com
|
|
||||||
git config --global user.name greptimedb-ci
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update the dev-builder image tag in the Makefile.
|
|
||||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add Makefile
|
|
||||||
git commit -s -m "ci: update dev-builder image tag"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "ci: update dev-builder image tag" \
|
|
||||||
--body "This PR updates the dev-builder image tag" \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer zyy17 \
|
|
||||||
--reviewer daviderli614
|
|
||||||
}
|
|
||||||
|
|
||||||
update_dev_builder_version
|
|
||||||
49
.github/scripts/update-helm-charts-version.sh
vendored
49
.github/scripts/update-helm-charts-version.sh
vendored
@@ -1,49 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
VERSION=${VERSION}
|
|
||||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
|
||||||
|
|
||||||
update_helm_charts_version() {
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email update-helm-charts-version@greptime.com
|
|
||||||
git config --global user.name update-helm-charts-version
|
|
||||||
|
|
||||||
# Clone helm-charts repository.
|
|
||||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
|
||||||
cd helm-charts
|
|
||||||
|
|
||||||
# Set default remote for gh CLI
|
|
||||||
gh repo set-default GreptimeTeam/helm-charts
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update version.
|
|
||||||
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
|
||||||
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
|
||||||
|
|
||||||
# Update docs.
|
|
||||||
make docs
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add .
|
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
|
||||||
--body "This PR updates the GreptimeDB version." \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer sunng87 \
|
|
||||||
--reviewer daviderli614 \
|
|
||||||
--reviewer killme2008 \
|
|
||||||
--reviewer evenyag \
|
|
||||||
--reviewer fengjiachun
|
|
||||||
}
|
|
||||||
|
|
||||||
update_helm_charts_version
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
VERSION=${VERSION}
|
|
||||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
|
||||||
|
|
||||||
update_homebrew_greptime_version() {
|
|
||||||
# Configure Git configs.
|
|
||||||
git config --global user.email update-greptime-version@greptime.com
|
|
||||||
git config --global user.name update-greptime-version
|
|
||||||
|
|
||||||
# Clone helm-charts repository.
|
|
||||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
|
||||||
cd homebrew-greptime
|
|
||||||
|
|
||||||
# Set default remote for gh CLI
|
|
||||||
gh repo set-default GreptimeTeam/homebrew-greptime
|
|
||||||
|
|
||||||
# Checkout a new branch.
|
|
||||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
|
|
||||||
# Update version.
|
|
||||||
make update-greptime-version VERSION=${VERSION}
|
|
||||||
|
|
||||||
# Commit the changes.
|
|
||||||
git add .
|
|
||||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
|
||||||
git push origin $BRANCH_NAME
|
|
||||||
|
|
||||||
# Create a Pull Request.
|
|
||||||
gh pr create \
|
|
||||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
|
||||||
--body "This PR updates the GreptimeDB version." \
|
|
||||||
--base main \
|
|
||||||
--head $BRANCH_NAME \
|
|
||||||
--reviewer sunng87 \
|
|
||||||
--reviewer daviderli614 \
|
|
||||||
--reviewer killme2008 \
|
|
||||||
--reviewer evenyag \
|
|
||||||
--reviewer fengjiachun
|
|
||||||
}
|
|
||||||
|
|
||||||
update_homebrew_greptime_version
|
|
||||||
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,13 +27,13 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@@ -41,11 +41,11 @@ function upload_artifacts() {
|
|||||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
function update_version_info() {
|
function update_version_info() {
|
||||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
|||||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||||
echo "Updating latest-nightly-version.txt"
|
echo "Updating latest-nightly-version.txt"
|
||||||
echo "$VERSION" > latest-nightly-version.txt
|
echo "$VERSION" > latest-nightly-version.txt
|
||||||
s5cmd cp \
|
aws s3 cp \
|
||||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
18
.github/workflows/apidoc.yml
vendored
18
.github/workflows/apidoc.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- develop
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- 'config/**'
|
- 'config/**'
|
||||||
@@ -12,17 +12,20 @@ on:
|
|||||||
|
|
||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- run: cargo doc --workspace --no-deps --document-private-items
|
- run: cargo doc --workspace --no-deps --document-private-items
|
||||||
- run: |
|
- run: |
|
||||||
cat <<EOF > target/doc/index.html
|
cat <<EOF > target/doc/index.html
|
||||||
@@ -37,4 +40,3 @@ jobs:
|
|||||||
uses: JamesIves/github-pages-deploy-action@v4
|
uses: JamesIves/github-pages-deploy-action@v4
|
||||||
with:
|
with:
|
||||||
folder: target/doc
|
folder: target/doc
|
||||||
single-commit: true
|
|
||||||
|
|||||||
35
.github/workflows/dependency-check.yml
vendored
35
.github/workflows/dependency-check.yml
vendored
@@ -1,35 +0,0 @@
|
|||||||
name: Check Dependencies
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-dependencies:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Set up Rust
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
|
|
||||||
- name: Run cargo tree
|
|
||||||
run: cargo tree --prefix none > dependencies.txt
|
|
||||||
|
|
||||||
- name: Extract dependency names
|
|
||||||
run: awk '{print $1}' dependencies.txt > dependency_names.txt
|
|
||||||
|
|
||||||
- name: Check for blacklisted crates
|
|
||||||
run: |
|
|
||||||
while read -r dep; do
|
|
||||||
if grep -qFx "$dep" dependency_names.txt; then
|
|
||||||
echo "Blacklisted crate '$dep' found in dependencies."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done < .github/cargo-blacklist.txt
|
|
||||||
echo "No blacklisted crates found."
|
|
||||||
105
.github/workflows/dev-build.yml
vendored
105
.github/workflows/dev-build.yml
vendored
@@ -4,11 +4,10 @@ name: GreptimeDB Development Build
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
large-page-size:
|
repository:
|
||||||
description: Build GreptimeDB with large page size (65536).
|
description: The public repository to build
|
||||||
type: boolean
|
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: GreptimeTeam/greptimedb
|
||||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||||
description: The commit to build
|
description: The commit to build
|
||||||
required: true
|
required: true
|
||||||
@@ -17,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -56,11 +55,6 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
upload_artifacts_to_s3:
|
|
||||||
type: boolean
|
|
||||||
description: Whether upload artifacts to s3
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
cargo_profile:
|
cargo_profile:
|
||||||
type: choice
|
type: choice
|
||||||
description: The cargo profile to use in building GreptimeDB.
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
@@ -82,6 +76,9 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
|
# Use the different image name to avoid conflict with the release images.
|
||||||
|
IMAGE_NAME: greptimedb-dev
|
||||||
|
|
||||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||||
|
|
||||||
@@ -89,7 +86,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -104,10 +101,9 @@ jobs:
|
|||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -159,18 +155,16 @@ jobs:
|
|||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
persist-credentials: true
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -180,9 +174,6 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -193,18 +184,16 @@ jobs:
|
|||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
persist-credentials: true
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -214,9 +203,6 @@ jobs:
|
|||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
large-page-size: ${{ inputs.large-page-size }}
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -226,34 +212,26 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
image-name: ${{ env.IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
- name: Echo Docker image tag to step summary
|
|
||||||
run: |
|
|
||||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
run: |
|
run: |
|
||||||
@@ -266,20 +244,19 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
src-image-name: ${{ env.IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -289,7 +266,6 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -298,17 +274,16 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -324,17 +299,16 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -347,40 +321,25 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Notifiy nightly build successful result
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Report CI status
|
|
||||||
id: report-ci-status
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/report-ci-failure.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
|
||||||
- name: Notify dev build successful result
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notify dev build failed result
|
- name: Notifiy nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||||
|
|||||||
858
.github/workflows/develop.yml
vendored
858
.github/workflows/develop.yml
vendored
@@ -1,9 +1,18 @@
|
|||||||
on:
|
on:
|
||||||
schedule:
|
|
||||||
- cron: "0 15 * * 1-5"
|
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
- 'config/**'
|
||||||
|
- '**.md'
|
||||||
|
- '.dockerignore'
|
||||||
|
- 'docker/**'
|
||||||
|
- '.gitignore'
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
- main
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- 'config/**'
|
- 'config/**'
|
||||||
@@ -11,8 +20,6 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
|
||||||
- 'Makefile'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -21,845 +28,174 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
env:
|
||||||
check-typos-and-docs:
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check typos and docs
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: crate-ci/typos@master
|
|
||||||
- name: Check the config docs
|
|
||||||
run: |
|
|
||||||
make config-docs && \
|
|
||||||
git diff --name-only --exit-code ./config/config.md \
|
|
||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|
||||||
|
|
||||||
license-header-check:
|
jobs:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
typos:
|
||||||
runs-on: ubuntu-latest
|
name: Spell Check with Typos
|
||||||
name: Check License Header
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: crate-ci/typos@v1.13.10
|
||||||
persist-credentials: false
|
|
||||||
- uses: korandoru/hawkeye@v5
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check
|
name: Check
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
# Shares with `Clippy` job
|
|
||||||
shared-key: "check-lint"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
toolchain: stable
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
build:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binaries
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-latest ]
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "build-binaries"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
|
||||||
shell: bash
|
|
||||||
run: cargo install cargo-gc-bin --force
|
|
||||||
- name: Build greptime binaries
|
|
||||||
shell: bash
|
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
|
||||||
- name: Pack greptime binaries
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
mkdir bins && \
|
|
||||||
mv ./target/debug/greptime bins && \
|
|
||||||
mv ./target/debug/sqlness-runner bins
|
|
||||||
- name: Print greptime binaries info
|
|
||||||
run: ls -lh bins
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: ./.github/actions/upload-artifacts
|
|
||||||
with:
|
|
||||||
artifacts-dir: bins
|
|
||||||
version: current
|
|
||||||
|
|
||||||
fuzztest:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Fuzz Test
|
|
||||||
needs: build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
|
||||||
steps:
|
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Set Rust Fuzz
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
|
||||||
rustup install nightly
|
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
|
||||||
- name: Download pre-built binaries
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bins
|
|
||||||
path: .
|
|
||||||
- name: Unzip binaries
|
|
||||||
run: |
|
|
||||||
tar -xvf ./bins.tar.gz
|
|
||||||
rm ./bins.tar.gz
|
|
||||||
- name: Run GreptimeDB
|
|
||||||
run: |
|
|
||||||
./bins/greptime standalone start&
|
|
||||||
- name: Fuzz Test
|
|
||||||
uses: ./.github/actions/fuzz-test
|
|
||||||
env:
|
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
with:
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
max-total-time: 120
|
|
||||||
|
|
||||||
unstable-fuzztest:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Unstable Fuzz Test
|
|
||||||
needs: build-greptime-ci
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
|
||||||
steps:
|
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Set Rust Fuzz
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
|
||||||
cargo install cargo-fuzz cargo-gc-bin --force
|
|
||||||
- name: Download pre-built binary
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bin
|
|
||||||
path: .
|
|
||||||
- name: Unzip binary
|
|
||||||
run: |
|
|
||||||
tar -xvf ./bin.tar.gz
|
|
||||||
rm ./bin.tar.gz
|
|
||||||
- name: Run Fuzz Test
|
|
||||||
uses: ./.github/actions/fuzz-test
|
|
||||||
env:
|
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
|
||||||
with:
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
max-total-time: 120
|
|
||||||
unstable: 'true'
|
|
||||||
- name: Upload unstable fuzz test logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: unstable-fuzz-logs
|
|
||||||
path: /tmp/unstable-greptime/
|
|
||||||
retention-days: 3
|
|
||||||
- name: Describe pods
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
|
|
||||||
build-greptime-ci:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binary (profile-CI)
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-latest ]
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "build-greptime-ci"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
|
||||||
shell: bash
|
|
||||||
run: cargo install cargo-gc-bin --force
|
|
||||||
- name: Build greptime binary
|
|
||||||
shell: bash
|
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
|
||||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
|
||||||
- name: Pack greptime binary
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
mkdir bin && \
|
|
||||||
mv ./target/ci/greptime bin
|
|
||||||
- name: Print greptime binaries info
|
|
||||||
run: ls -lh bin
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: ./.github/actions/upload-artifacts
|
|
||||||
with:
|
|
||||||
artifacts-dir: bin
|
|
||||||
version: current
|
|
||||||
|
|
||||||
distributed-fuzztest:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build-greptime-ci
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
|
||||||
mode:
|
|
||||||
- name: "Remote WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: true
|
|
||||||
values: "with-remote-wal.yaml"
|
|
||||||
steps:
|
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Setup Kind
|
|
||||||
uses: ./.github/actions/setup-kind
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Setup Minio
|
|
||||||
uses: ./.github/actions/setup-minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup Kafka cluster
|
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
|
||||||
- name: Setup Etcd cluster
|
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
|
||||||
# Prepares for fuzz tests
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Set Rust Fuzz
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
|
||||||
rustup install nightly
|
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
|
||||||
# Downloads ci image
|
|
||||||
- name: Download pre-built binariy
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bin
|
|
||||||
path: .
|
|
||||||
- name: Unzip binary
|
|
||||||
run: |
|
|
||||||
tar -xvf ./bin.tar.gz
|
|
||||||
rm ./bin.tar.gz
|
|
||||||
- name: Build and push GreptimeDB image
|
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
|
||||||
- name: Wait for etcd
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=etcd \
|
|
||||||
--timeout=120s \
|
|
||||||
-n etcd-cluster
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Wait for minio
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app=minio \
|
|
||||||
--timeout=120s \
|
|
||||||
-n minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Wait for kafka
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=kafka \
|
|
||||||
--timeout=120s \
|
|
||||||
-n kafka-cluster
|
|
||||||
- name: Print etcd info
|
|
||||||
shell: bash
|
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
|
||||||
# Setup cluster for test
|
|
||||||
- name: Setup GreptimeDB cluster
|
|
||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
|
||||||
with:
|
|
||||||
image-registry: localhost:5001
|
|
||||||
values-filename: ${{ matrix.mode.values }}
|
|
||||||
- name: Port forward (mysql)
|
|
||||||
run: |
|
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
|
||||||
- name: Fuzz Test
|
|
||||||
uses: ./.github/actions/fuzz-test
|
|
||||||
env:
|
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
with:
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
max-total-time: 120
|
|
||||||
- name: Describe Nodes
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe nodes
|
|
||||||
- name: Describe pod
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
- name: Export kind logs
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind export logs /tmp/kind
|
|
||||||
- name: Upload logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
|
||||||
path: /tmp/kind
|
|
||||||
retention-days: 3
|
|
||||||
- name: Delete cluster
|
|
||||||
if: success()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind delete cluster
|
|
||||||
docker stop $(docker ps -a -q)
|
|
||||||
docker rm $(docker ps -a -q)
|
|
||||||
docker system prune -f
|
|
||||||
|
|
||||||
distributed-fuzztest-with-chaos:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build-greptime-ci
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
|
||||||
mode:
|
|
||||||
- name: "Remote WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: true
|
|
||||||
values: "with-remote-wal.yaml"
|
|
||||||
include:
|
|
||||||
- target: "fuzz_migrate_mito_regions"
|
|
||||||
mode:
|
|
||||||
name: "Local WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: false
|
|
||||||
values: "with-minio.yaml"
|
|
||||||
- target: "fuzz_migrate_metric_regions"
|
|
||||||
mode:
|
|
||||||
name: "Local WAL"
|
|
||||||
minio: true
|
|
||||||
kafka: false
|
|
||||||
values: "with-minio.yaml"
|
|
||||||
steps:
|
|
||||||
- name: Remove unused software
|
|
||||||
run: |
|
|
||||||
echo "Disk space before:"
|
|
||||||
df -h
|
|
||||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
|
||||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
|
||||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
|
||||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
|
||||||
sudo docker image prune --all --force
|
|
||||||
sudo docker builder prune -a
|
|
||||||
echo "Disk space after:"
|
|
||||||
df -h
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Setup Kind
|
|
||||||
uses: ./.github/actions/setup-kind
|
|
||||||
- name: Setup Chaos Mesh
|
|
||||||
uses: ./.github/actions/setup-chaos
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Setup Minio
|
|
||||||
uses: ./.github/actions/setup-minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup Kafka cluster
|
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
|
||||||
- name: Setup Etcd cluster
|
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
|
||||||
# Prepares for fuzz tests
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Set Rust Fuzz
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
|
||||||
rustup install nightly
|
|
||||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
|
||||||
# Downloads ci image
|
|
||||||
- name: Download pre-built binariy
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bin
|
|
||||||
path: .
|
|
||||||
- name: Unzip binary
|
|
||||||
run: |
|
|
||||||
tar -xvf ./bin.tar.gz
|
|
||||||
rm ./bin.tar.gz
|
|
||||||
- name: Build and push GreptimeDB image
|
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
|
||||||
- name: Wait for etcd
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=etcd \
|
|
||||||
--timeout=120s \
|
|
||||||
-n etcd-cluster
|
|
||||||
- if: matrix.mode.minio
|
|
||||||
name: Wait for minio
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app=minio \
|
|
||||||
--timeout=120s \
|
|
||||||
-n minio
|
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Wait for kafka
|
|
||||||
run: |
|
|
||||||
kubectl wait \
|
|
||||||
--for=condition=Ready \
|
|
||||||
pod -l app.kubernetes.io/instance=kafka \
|
|
||||||
--timeout=120s \
|
|
||||||
-n kafka-cluster
|
|
||||||
- name: Print etcd info
|
|
||||||
shell: bash
|
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
|
||||||
# Setup cluster for test
|
|
||||||
- name: Setup GreptimeDB cluster
|
|
||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
|
||||||
with:
|
|
||||||
image-registry: localhost:5001
|
|
||||||
values-filename: ${{ matrix.mode.values }}
|
|
||||||
enable-region-failover: ${{ matrix.mode.kafka }}
|
|
||||||
- name: Port forward (mysql)
|
|
||||||
run: |
|
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
|
||||||
- name: Fuzz Test
|
|
||||||
uses: ./.github/actions/fuzz-test
|
|
||||||
env:
|
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
|
||||||
with:
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
max-total-time: 120
|
|
||||||
- name: Describe Nodes
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe nodes
|
|
||||||
- name: Describe pods
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl describe pod -n my-greptimedb
|
|
||||||
- name: Export kind logs
|
|
||||||
if: failure()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind export logs /tmp/kind
|
|
||||||
- name: Upload logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
|
||||||
path: /tmp/kind
|
|
||||||
retention-days: 3
|
|
||||||
- name: Delete cluster
|
|
||||||
if: success()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kind delete cluster
|
|
||||||
docker stop $(docker ps -a -q)
|
|
||||||
docker rm $(docker ps -a -q)
|
|
||||||
docker system prune -f
|
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
name: Sqlness Test
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
if: github.event.pull_request.draft == false
|
||||||
needs: build
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-20.04-8-cores ]
|
||||||
mode:
|
|
||||||
- name: "Basic"
|
|
||||||
opts: ""
|
|
||||||
kafka: false
|
|
||||||
- name: "Remote WAL"
|
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
|
||||||
kafka: true
|
|
||||||
- name: "PostgreSQL KvBackend"
|
|
||||||
opts: "--setup-pg"
|
|
||||||
kafka: false
|
|
||||||
- name: "MySQL Kvbackend"
|
|
||||||
opts: "--setup-mysql"
|
|
||||||
kafka: false
|
|
||||||
- name: "Flat format"
|
|
||||||
opts: "--enable-flat-format"
|
|
||||||
kafka: false
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
- uses: arduino/setup-protoc@v1
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
- if: matrix.mode.kafka
|
|
||||||
name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures
|
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka
|
|
||||||
|
|
||||||
- name: Download pre-built binaries
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
with:
|
||||||
name: bins
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
path: .
|
- name: Rust Cache
|
||||||
- name: Unzip binaries
|
uses: Swatinem/rust-cache@v2
|
||||||
run: tar -xvf ./bins.tar.gz
|
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner bare ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: cargo sqlness
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: failure()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs-${{ matrix.mode.name }}
|
name: sqlness-logs
|
||||||
path: /tmp/sqlness*
|
path: ${{ runner.temp }}/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Check format
|
- name: Rust Cache
|
||||||
run: make fmt-check
|
uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Run cargo fmt
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
# Shares with `Check` job
|
|
||||||
shared-key: "check-lint"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||||
|
|
||||||
check-udeps:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check Unused Dependencies
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
shared-key: "check-udeps"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-udeps
|
|
||||||
run: cargo install cargo-udeps --locked
|
|
||||||
- name: Check unused dependencies
|
|
||||||
run: make check-udeps
|
|
||||||
|
|
||||||
conflict-check:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Check for conflict
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Merge Conflict Finder
|
|
||||||
uses: olivernybroe/action-conflict-finder@v4.0
|
|
||||||
|
|
||||||
test:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
|
||||||
runs-on: ubuntu-22.04-arm
|
|
||||||
timeout-minutes: 60
|
|
||||||
needs: [conflict-check, clippy, fmt, check-udeps]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: rui314/setup-mold@v1
|
|
||||||
- name: Install toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
with:
|
|
||||||
cache: false
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares cross multiple jobs
|
|
||||||
shared-key: "coverage-test"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install latest nextest release
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
|
|
||||||
- name: Setup external services
|
|
||||||
working-directory: tests-integration/fixtures
|
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
|
||||||
|
|
||||||
- name: Run nextest cases
|
|
||||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
RUST_MIN_STACK: 8388608 # 8MB
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
|
||||||
GT_MINIO_BUCKET: greptime
|
|
||||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
|
||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
|
||||||
GT_MINIO_REGION: us-west-2
|
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
|
||||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
|
||||||
GT_POSTGRES15_SCHEMA: test_schema
|
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-22.04-8-cores
|
runs-on: ubuntu-20.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: rui314/setup-mold@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
- name: Install toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
with:
|
with:
|
||||||
components: llvm-tools
|
version: "14.0"
|
||||||
cache: false
|
- name: Install toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
|
||||||
# Shares cross multiple jobs
|
|
||||||
shared-key: "coverage-test"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
|
- name: Install Python
|
||||||
- name: Setup external services
|
uses: actions/setup-python@v4
|
||||||
working-directory: tests-integration/fixtures
|
with:
|
||||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
python-version: '3.10'
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
run: pip install pyarrow
|
||||||
|
- name: Setup etcd server
|
||||||
|
working-directory: tests-integration/fixtures/etcd
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
GT_MINIO_BUCKET: greptime
|
|
||||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
|
||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
|
||||||
GT_MINIO_REGION: us-west-2
|
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
|
||||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
|
||||||
GT_POSTGRES15_SCHEMA: test_schema
|
|
||||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v2
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
files: ./lcov.info
|
files: ./lcov.info
|
||||||
flags: rust
|
flags: rust
|
||||||
fail_ci_if_error: false
|
fail_ci_if_error: false
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
|
||||||
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
# name: Compatibility Test
|
|
||||||
# needs: build
|
|
||||||
# runs-on: ubuntu-22.04
|
|
||||||
# timeout-minutes: 60
|
|
||||||
# steps:
|
|
||||||
# - uses: actions/checkout@v4
|
|
||||||
# - name: Download pre-built binaries
|
|
||||||
# uses: actions/download-artifact@v4
|
|
||||||
# with:
|
|
||||||
# name: bins
|
|
||||||
# path: .
|
|
||||||
# - name: Unzip binaries
|
|
||||||
# run: |
|
|
||||||
# mkdir -p ./bins/current
|
|
||||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
|
||||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
|
||||||
|
|||||||
39
.github/workflows/doc-issue.yml
vendored
Normal file
39
.github/workflows/doc-issue.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: Create Issue in downstream repos
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types:
|
||||||
|
- labeled
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- labeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
doc_issue:
|
||||||
|
if: github.event.label.name == 'doc update required'
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- name: create an issue in doc repo
|
||||||
|
uses: dacbd/create-issue-action@main
|
||||||
|
with:
|
||||||
|
owner: GreptimeTeam
|
||||||
|
repo: docs
|
||||||
|
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||||
|
body: |
|
||||||
|
A document change request is generated from
|
||||||
|
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||||
|
cloud_issue:
|
||||||
|
if: github.event.label.name == 'cloud followup required'
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- name: create an issue in cloud repo
|
||||||
|
uses: dacbd/create-issue-action@main
|
||||||
|
with:
|
||||||
|
owner: GreptimeTeam
|
||||||
|
repo: greptimedb-cloud
|
||||||
|
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||||
|
body: |
|
||||||
|
A followup request is generated from
|
||||||
|
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||||
20
.github/workflows/doc-label.yml
vendored
Normal file
20
.github/workflows/doc-label.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
name: "PR Doc Labeler"
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
triage:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: github/issue-labeler@v3.3
|
||||||
|
with:
|
||||||
|
configuration-path: .github/doc-label-config.yml
|
||||||
|
enable-versioned-regex: false
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
sync-labels: 1
|
||||||
27
.github/workflows/docbot.yml
vendored
27
.github/workflows/docbot.yml
vendored
@@ -1,27 +0,0 @@
|
|||||||
name: Follow Up Docs
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened, edited]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docbot:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
contents: read
|
|
||||||
timeout-minutes: 10
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Maybe Follow Up Docs Issue
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
|
||||||
env:
|
|
||||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
57
.github/workflows/docs.yml
vendored
57
.github/workflows/docs.yml
vendored
@@ -9,10 +9,9 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
|
||||||
- 'Makefile'
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
|
- develop
|
||||||
- main
|
- main
|
||||||
paths:
|
paths:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
@@ -21,8 +20,6 @@ on:
|
|||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'grafana/**'
|
|
||||||
- 'Makefile'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: CI
|
name: CI
|
||||||
@@ -33,65 +30,41 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
- uses: crate-ci/typos@v1.13.10
|
||||||
persist-credentials: false
|
|
||||||
- uses: crate-ci/typos@master
|
|
||||||
|
|
||||||
license-header-check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Check License Header
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: korandoru/hawkeye@v5
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
steps:
|
runs-on: ubuntu-20.04
|
||||||
- run: 'echo "No action required"'
|
|
||||||
|
|
||||||
check-udeps:
|
|
||||||
name: Unused Dependencies
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
if: github.event.pull_request.draft == false
|
||||||
steps:
|
runs-on: ubuntu-20.04
|
||||||
- run: 'echo "No action required"'
|
|
||||||
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test
|
||||||
runs-on: ${{ matrix.os }}
|
if: github.event.pull_request.draft == false
|
||||||
strategy:
|
runs-on: ubuntu-20.04
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-latest ]
|
|
||||||
mode:
|
|
||||||
- name: "Basic"
|
|
||||||
- name: "Remote WAL"
|
|
||||||
- name: "Flat format"
|
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
26
.github/workflows/grafana.yml
vendored
26
.github/workflows/grafana.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: Check Grafana Panels
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-panels:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Check out the repository
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
# Install jq (required for the script)
|
|
||||||
- name: Install jq
|
|
||||||
run: sudo apt-get install -y jq
|
|
||||||
|
|
||||||
# Make the check.sh script executable
|
|
||||||
- name: Check grafana dashboards
|
|
||||||
run: |
|
|
||||||
make check-dashboards
|
|
||||||
16
.github/workflows/license.yaml
vendored
Normal file
16
.github/workflows/license.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: License checker
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
|
jobs:
|
||||||
|
license-header-check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
name: license-header-check
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Check License Header
|
||||||
|
uses: korandoru/hawkeye@v3
|
||||||
57
.github/workflows/multi-lang-tests.yml
vendored
57
.github/workflows/multi-lang-tests.yml
vendored
@@ -1,57 +0,0 @@
|
|||||||
name: Multi-language Integration Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-greptimedb:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
name: Build GreptimeDB binary
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
shared-key: "multi-lang-build"
|
|
||||||
cache-all-crates: "true"
|
|
||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
- name: Install cargo-gc-bin
|
|
||||||
shell: bash
|
|
||||||
run: cargo install cargo-gc-bin --force
|
|
||||||
- name: Build greptime binary
|
|
||||||
shell: bash
|
|
||||||
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
|
||||||
- name: Pack greptime binary
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
mkdir bin && \
|
|
||||||
mv ./target/debug/greptime bin
|
|
||||||
- name: Print greptime binary info
|
|
||||||
run: ls -lh bin
|
|
||||||
- name: Upload greptime binary
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: greptime-bin
|
|
||||||
path: bin/
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
needs: build-greptimedb
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-bin
|
|
||||||
99
.github/workflows/nightly-build.yml
vendored
99
.github/workflows/nightly-build.yml
vendored
@@ -12,13 +12,13 @@ on:
|
|||||||
linux_amd64_runner:
|
linux_amd64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.2xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -27,7 +27,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.4xlarge-arm64
|
default: ec2-c6g.2xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
@@ -70,7 +70,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -85,10 +85,9 @@ jobs:
|
|||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -138,10 +137,9 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -149,8 +147,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -160,10 +156,9 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -171,20 +166,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
|
||||||
needs: [
|
|
||||||
allocate-runners,
|
|
||||||
build-linux-amd64-artifacts,
|
|
||||||
]
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
|
||||||
artifact-is-tarball: true
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -194,25 +175,23 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
|
||||||
- name: Set nightly build result
|
- name: Set nightly build result
|
||||||
id: set-nightly-build-result
|
id: set-nightly-build-result
|
||||||
@@ -226,23 +205,22 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
src-image-name: greptimedb
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -252,26 +230,24 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
upload-to-s3: false
|
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
update-version-info: false # Don't update version info in S3.
|
update-version-info: false # Don't update version info in S3.
|
||||||
push-latest-tag: false
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -287,17 +263,16 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -310,39 +285,25 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Notifiy nightly build successful result
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Report CI status
|
|
||||||
id: report-ci-status
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/report-ci-failure.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
|
||||||
- name: Notify nightly build successful result
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notify nightly build failed result
|
- name: Notifiy nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||||
|
|||||||
176
.github/workflows/nightly-ci.yml
vendored
176
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,8 @@
|
|||||||
|
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 23 * * 1-4"
|
- cron: '0 23 * * 1-5'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: Nightly CI
|
name: Nightly CI
|
||||||
@@ -9,168 +11,90 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness:
|
||||||
name: Run sqlness test
|
name: Sqlness Test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
strategy:
|
||||||
- name: Checkout
|
matrix:
|
||||||
uses: actions/checkout@v4
|
os: [ windows-latest-8-cores ]
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Check install.sh
|
|
||||||
run: ./.github/scripts/check-install-script.sh
|
|
||||||
|
|
||||||
- name: Run sqlness test
|
|
||||||
uses: ./.github/actions/sqlness-test
|
|
||||||
with:
|
|
||||||
data-root: sqlness-test
|
|
||||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
|
||||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
|
||||||
- name: Upload sqlness logs
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: sqlness-logs-kind
|
|
||||||
path: /tmp/kind/
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
sqlness-windows:
|
|
||||||
name: Sqlness tests on Windows
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
runs-on: windows-2022-8-cores
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4.1.0
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: make sqlness-test
|
run: cargo sqlness
|
||||||
env:
|
- name: Notify slack if failed
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
|
||||||
- name: Upload sqlness logs
|
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed for sqlness tests"}
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
path: ${{ runner.temp }}/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
test-on-windows:
|
test-on-windows:
|
||||||
name: Run tests on Windows
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-2022-8-cores
|
runs-on: windows-latest-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4.1.0
|
||||||
with:
|
- uses: arduino/setup-protoc@v1
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
|
||||||
with:
|
|
||||||
version: "14.0"
|
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
components: llvm-tools-preview
|
components: llvm-tools-preview
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install Cargo Nextest
|
- name: Install Cargo Nextest
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
run: pip install pyarrow
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
uses: Vampire/setup-wsl@v2
|
uses: Vampire/setup-wsl@v2
|
||||||
with:
|
with:
|
||||||
distribution: Ubuntu-22.04
|
distribution: Ubuntu-22.04
|
||||||
- name: Running tests
|
- name: Running tests
|
||||||
run: cargo nextest run -F dashboard
|
run: cargo nextest run -F pyo3_backend,dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
- name: Notify slack if failed
|
||||||
cleanbuild-linux-nix:
|
if: failure()
|
||||||
name: Run clean build on Linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
timeout-minutes: 45
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: cachix/install-nix-action@v31
|
|
||||||
- run: nix develop --command cargo check --bin greptime
|
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
|
||||||
|
|
||||||
check-status:
|
|
||||||
name: Check status
|
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
|
||||||
steps:
|
|
||||||
- name: Set check result
|
|
||||||
id: set-check-result
|
|
||||||
run: |
|
|
||||||
echo "check-result=success" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
notification:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
|
||||||
name: Send notification to Greptime team
|
|
||||||
needs: [check-status]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Report CI status
|
|
||||||
id: report-ci-status
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/report-ci-failure.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
|
||||||
- name: Notify dev build successful result
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "Nightly CI has completed successfully."}
|
{"text": "Nightly CI failed for cargo test"}
|
||||||
|
|
||||||
- name: Notify dev build failed result
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
|
||||||
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
|
||||||
|
|||||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
name: Nightly functional tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# At 00:00 on Tuesday.
|
||||||
|
- cron: '0 0 * * 2'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sqlness-test:
|
||||||
|
name: Run sqlness test
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Run sqlness test
|
||||||
|
uses: ./.github/actions/sqlness-test
|
||||||
|
with:
|
||||||
|
data-root: sqlness-test
|
||||||
|
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
42
.github/workflows/pr-labeling.yaml
vendored
42
.github/workflows/pr-labeling.yaml
vendored
@@ -1,42 +0,0 @@
|
|||||||
name: 'PR Labeling'
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
labeler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/labeler@v5
|
|
||||||
with:
|
|
||||||
configuration-path: ".github/labeler.yaml"
|
|
||||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
|
|
||||||
size-label:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: pascalgn/size-label-action@v0.5.5
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
with:
|
|
||||||
sizes: >
|
|
||||||
{
|
|
||||||
"0": "XS",
|
|
||||||
"100": "S",
|
|
||||||
"300": "M",
|
|
||||||
"1000": "L",
|
|
||||||
"1500": "XL",
|
|
||||||
"2000": "XXL"
|
|
||||||
}
|
|
||||||
36
.github/workflows/pr-review-reminder.yml
vendored
36
.github/workflows/pr-review-reminder.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: PR Review Reminder
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
|
|
||||||
- cron: '0 1 * * 1,3,5'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pr-review-reminder:
|
|
||||||
name: Send PR Review Reminders
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '20'
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
working-directory: .github/scripts
|
|
||||||
run: npm ci
|
|
||||||
|
|
||||||
- name: Run PR review reminder
|
|
||||||
working-directory: .github/scripts
|
|
||||||
run: node pr-review-reminder.js
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
SLACK_PR_REVIEW_WEBHOOK_URL: ${{ vars.SLACK_PR_REVIEW_WEBHOOK_URL }}
|
|
||||||
GITHUBID_SLACKID_MAPPING: ${{ vars.GITHUBID_SLACKID_MAPPING }}
|
|
||||||
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: "PR Title Checker"
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- edited
|
||||||
|
- synchronize
|
||||||
|
- labeled
|
||||||
|
- unlabeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 10
|
||||||
|
steps:
|
||||||
|
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
pass_on_octokit_error: false
|
||||||
|
configuration_path: ".github/pr-title-checker-config.json"
|
||||||
|
breaking:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 10
|
||||||
|
steps:
|
||||||
|
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
pass_on_octokit_error: false
|
||||||
|
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||||
207
.github/workflows/release-dev-builder-images.yaml
vendored
207
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,14 +1,12 @@
|
|||||||
name: Release dev-builder images
|
name: Release dev-builder images
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- rust-toolchain.toml
|
|
||||||
- 'docker/dev-builder/**'
|
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
|
version:
|
||||||
|
description: Version of the dev-builder
|
||||||
|
required: false
|
||||||
|
default: latest
|
||||||
release_dev_builder_ubuntu_image:
|
release_dev_builder_ubuntu_image:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Release dev-builder-ubuntu image
|
description: Release dev-builder-ubuntu image
|
||||||
@@ -24,209 +22,64 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
update_dev_builder_image_tag:
|
|
||||||
type: boolean
|
|
||||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
# The jobs are triggered by the following events:
|
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||||
# 1. Manually triggered workflow_dispatch event
|
runs-on: ubuntu-20.04-16-cores
|
||||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
|
||||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Configure build image version
|
|
||||||
id: set-version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
|
||||||
buildTime=`date +%Y%m%d%H%M%S`
|
|
||||||
BUILD_VERSION="$commitShortSHA-$buildTime"
|
|
||||||
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
|
||||||
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
|
||||||
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
|
||||||
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Build and push dev builder images
|
- name: Build and push dev builder images
|
||||||
uses: ./.github/actions/build-dev-builder-images
|
uses: ./.github/actions/build-dev-builder-images
|
||||||
with:
|
with:
|
||||||
version: ${{ env.VERSION }}
|
version: ${{ inputs.version }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
|
||||||
name: Release dev builder images to AWS ECR
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [
|
|
||||||
release-dev-builder-images
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials
|
|
||||||
uses: aws-actions/configure-aws-credentials@v4
|
|
||||||
with:
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.ECR_REGION }}
|
|
||||||
|
|
||||||
- name: Login to Amazon ECR
|
|
||||||
id: login-ecr-public
|
|
||||||
uses: aws-actions/amazon-ecr-login@v2
|
|
||||||
env:
|
|
||||||
AWS_REGION: ${{ vars.ECR_REGION }}
|
|
||||||
with:
|
|
||||||
registry-type: public
|
|
||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
|
||||||
env:
|
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
|
||||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
run: |
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
|
||||||
quay.io/skopeo/stable:latest \
|
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
|
||||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
|
||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Login to AliCloud Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
env:
|
env:
|
||||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
quay.io/skopeo/stable:latest \
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
|
||||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
|
||||||
|
|
||||||
update-dev-builder-image-tag:
|
|
||||||
name: Update dev-builder image tag
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
|
||||||
needs: [
|
|
||||||
release-dev-builder-images
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Update dev-builder image tag
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
|
||||||
|
|||||||
253
.github/workflows/release.yml
vendored
253
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-22.04
|
- ubuntu-20.04
|
||||||
- ubuntu-22.04-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-22.04-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-22.04-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-22.04-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -31,9 +31,8 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.8xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ubuntu-2204-32-cores-arm
|
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||||
@@ -83,24 +82,27 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
|
|
||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
|
NEXT_RELEASE_VERSION: v0.6.0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||||
windows-runner: windows-2022-8-cores
|
windows-runner: windows-latest-8-cores
|
||||||
|
|
||||||
# The following EC2 resource id will be used for resource releasing.
|
# The following EC2 resource id will be used for resource releasing.
|
||||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
@@ -110,24 +112,15 @@ jobs:
|
|||||||
|
|
||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
|
|
||||||
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Check Rust toolchain version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
./scripts/check-builder-rust-version.sh
|
|
||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -136,13 +129,9 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
|
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
- name: Check version
|
|
||||||
id: check-version
|
|
||||||
run: |
|
|
||||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
uses: ./.github/actions/start-runner
|
uses: ./.github/actions/start-runner
|
||||||
@@ -179,10 +168,9 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -190,8 +178,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -201,10 +187,9 @@ jobs:
|
|||||||
]
|
]
|
||||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -212,20 +197,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
|
||||||
|
|
||||||
run-multi-lang-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
|
||||||
needs: [
|
|
||||||
allocate-runners,
|
|
||||||
build-linux-amd64-artifacts,
|
|
||||||
]
|
|
||||||
uses: ./.github/workflows/run-multi-lang-tests.yml
|
|
||||||
with:
|
|
||||||
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
|
||||||
artifact-is-tarball: true
|
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
@@ -237,38 +208,38 @@ jobs:
|
|||||||
arch: aarch64-apple-darwin
|
arch: aarch64-apple-darwin
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-darwin-arm64
|
artifacts-dir-prefix: greptime-darwin-arm64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
|
arch: aarch64-apple-darwin
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
arch: x86_64-apple-darwin
|
arch: x86_64-apple-darwin
|
||||||
artifacts-dir-prefix: greptime-darwin-amd64
|
artifacts-dir-prefix: greptime-darwin-amd64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
arch: x86_64-apple-darwin
|
||||||
|
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
|
||||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
]
|
]
|
||||||
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-macos-artifacts
|
- uses: ./.github/actions/build-macos-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
|
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
disable-run-tests: true
|
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
- name: Set build macos result
|
|
||||||
id: set-build-macos-result
|
|
||||||
run: |
|
|
||||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
build-windows-artifacts:
|
build-windows-artifacts:
|
||||||
name: Build Windows artifacts
|
name: Build Windows artifacts
|
||||||
strategy:
|
strategy:
|
||||||
@@ -279,9 +250,11 @@ jobs:
|
|||||||
arch: x86_64-pc-windows-msvc
|
arch: x86_64-pc-windows-msvc
|
||||||
features: servers/dashboard
|
features: servers/dashboard
|
||||||
artifacts-dir-prefix: greptime-windows-amd64
|
artifacts-dir-prefix: greptime-windows-amd64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||||
|
arch: x86_64-pc-windows-msvc
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
outputs:
|
|
||||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
]
|
]
|
||||||
@@ -289,25 +262,20 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: ./.github/actions/build-windows-artifacts
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
with:
|
with:
|
||||||
arch: ${{ matrix.arch }}
|
arch: ${{ matrix.arch }}
|
||||||
|
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
- name: Set build windows result
|
|
||||||
id: set-build-windows-result
|
|
||||||
run: |
|
|
||||||
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
@@ -315,32 +283,21 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-2004-16-cores
|
||||||
outputs:
|
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
|
||||||
|
|
||||||
- name: Set build image result
|
|
||||||
id: set-build-image-result
|
|
||||||
run: |
|
|
||||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
release-cn-artifacts:
|
release-cn-artifacts:
|
||||||
name: Release artifacts to CN region
|
name: Release artifacts to CN region
|
||||||
@@ -353,23 +310,22 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest-16-cores
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
src-image-name: greptimedb
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -380,9 +336,8 @@ jobs:
|
|||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: true
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -394,14 +349,12 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
run-multi-lang-tests,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Publish GitHub release
|
- name: Publish GitHub release
|
||||||
uses: ./.github/actions/publish-github-release
|
uses: ./.github/actions/publish-github-release
|
||||||
@@ -410,22 +363,21 @@ jobs:
|
|||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -441,17 +393,16 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -462,113 +413,3 @@ jobs:
|
|||||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
bump-downstream-repo-versions:
|
|
||||||
name: Bump downstream repo versions
|
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Bump downstream repo versions
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/bump-versions.ts
|
|
||||||
env:
|
|
||||||
TARGET_REPOS: website,docs,demo
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
|
||||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
|
||||||
|
|
||||||
bump-helm-charts-version:
|
|
||||||
name: Bump helm charts version
|
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Bump helm charts version
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-helm-charts-version.sh
|
|
||||||
|
|
||||||
bump-homebrew-greptime-version:
|
|
||||||
name: Bump homebrew greptime version
|
|
||||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
|
||||||
needs: [allocate-runners, publish-github-release]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Bump homebrew greptime version
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
|
||||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
|
||||||
run: |
|
|
||||||
./.github/scripts/update-homebrew-greptme-version.sh
|
|
||||||
|
|
||||||
notification:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
|
||||||
name: Send notification to Greptime team
|
|
||||||
needs: [
|
|
||||||
release-images-to-dockerhub,
|
|
||||||
build-macos-artifacts,
|
|
||||||
build-windows-artifacts,
|
|
||||||
]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Report CI status
|
|
||||||
id: report-ci-status
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/report-ci-failure.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
|
||||||
- name: Notify release successful result
|
|
||||||
uses: slackapi/slack-github-action@v1.25.0
|
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "GreptimeDB's release version has completed successfully."}
|
|
||||||
|
|
||||||
- name: Notify release failed result
|
|
||||||
uses: slackapi/slack-github-action@v1.25.0
|
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
|
||||||
|
|||||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
194
.github/workflows/run-multi-lang-tests.yml
vendored
@@ -1,194 +0,0 @@
|
|||||||
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
|
||||||
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
|
||||||
# Supports both direct binary artifacts and tarball artifacts
|
|
||||||
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
artifact-name:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
description: 'Name of the artifact containing greptime binary'
|
|
||||||
http-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4000'
|
|
||||||
description: 'HTTP server port'
|
|
||||||
mysql-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4002'
|
|
||||||
description: 'MySQL server port'
|
|
||||||
postgres-port:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: '4003'
|
|
||||||
description: 'PostgreSQL server port'
|
|
||||||
db-name:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'test_db'
|
|
||||||
description: 'Test database name'
|
|
||||||
username:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'greptime_user'
|
|
||||||
description: 'Authentication username'
|
|
||||||
password:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: 'greptime_pwd'
|
|
||||||
description: 'Authentication password'
|
|
||||||
timeout-minutes:
|
|
||||||
required: false
|
|
||||||
type: number
|
|
||||||
default: 30
|
|
||||||
description: 'Job timeout in minutes'
|
|
||||||
artifact-is-tarball:
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-tests:
|
|
||||||
name: Run Multi-language SDK Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: ${{ inputs.timeout-minutes }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout greptimedb-tests repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
repository: GreptimeTeam/greptimedb-tests
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Download pre-built greptime binary
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.artifact-name }}
|
|
||||||
path: artifact
|
|
||||||
|
|
||||||
- name: Setup greptime binary
|
|
||||||
run: |
|
|
||||||
mkdir -p bin
|
|
||||||
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
|
||||||
# Extract tarball and find greptime binary
|
|
||||||
tar -xzf artifact/*.tar.gz -C artifact
|
|
||||||
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
|
||||||
else
|
|
||||||
# Direct binary format
|
|
||||||
if [ -f artifact/greptime ]; then
|
|
||||||
cp artifact/greptime bin/greptime
|
|
||||||
else
|
|
||||||
cp artifact/* bin/greptime
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
chmod +x ./bin/greptime
|
|
||||||
ls -lh ./bin/greptime
|
|
||||||
./bin/greptime --version
|
|
||||||
|
|
||||||
- name: Setup Java 17
|
|
||||||
uses: actions/setup-java@v4
|
|
||||||
with:
|
|
||||||
distribution: 'temurin'
|
|
||||||
java-version: '17'
|
|
||||||
cache: 'maven'
|
|
||||||
|
|
||||||
- name: Setup Python 3.8
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
|
|
||||||
- name: Setup Go 1.24
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.24'
|
|
||||||
cache: true
|
|
||||||
cache-dependency-path: go-tests/go.sum
|
|
||||||
|
|
||||||
- name: Set up Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '18'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
pip install mysql-connector-python psycopg2-binary
|
|
||||||
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
|
||||||
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
|
||||||
|
|
||||||
- name: Install Go dependencies
|
|
||||||
working-directory: go-tests
|
|
||||||
run: |
|
|
||||||
go mod download
|
|
||||||
go mod verify
|
|
||||||
go version
|
|
||||||
|
|
||||||
- name: Kill existing GreptimeDB processes
|
|
||||||
run: |
|
|
||||||
pkill -f greptime || true
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
- name: Start GreptimeDB standalone
|
|
||||||
run: |
|
|
||||||
./bin/greptime standalone start \
|
|
||||||
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
|
||||||
--rpc-addr 0.0.0.0:4001 \
|
|
||||||
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
|
||||||
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
|
||||||
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
|
||||||
|
|
||||||
- name: Wait for GreptimeDB to be ready
|
|
||||||
run: |
|
|
||||||
echo "Waiting for GreptimeDB..."
|
|
||||||
for i in {1..60}; do
|
|
||||||
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
|
||||||
echo "✅ GreptimeDB is ready"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "❌ GreptimeDB failed to start"
|
|
||||||
cat /tmp/greptimedb.log
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Run multi-language tests
|
|
||||||
env:
|
|
||||||
DB_NAME: ${{ inputs.db-name }}
|
|
||||||
MYSQL_HOST: 127.0.0.1
|
|
||||||
MYSQL_PORT: ${{ inputs.mysql-port }}
|
|
||||||
POSTGRES_HOST: 127.0.0.1
|
|
||||||
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
|
||||||
HTTP_HOST: 127.0.0.1
|
|
||||||
HTTP_PORT: ${{ inputs.http-port }}
|
|
||||||
GREPTIME_USERNAME: ${{ inputs.username }}
|
|
||||||
GREPTIME_PASSWORD: ${{ inputs.password }}
|
|
||||||
run: |
|
|
||||||
chmod +x ./run_tests.sh
|
|
||||||
./run_tests.sh
|
|
||||||
|
|
||||||
- name: Collect logs on failure
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
echo "=== GreptimeDB Logs ==="
|
|
||||||
cat /tmp/greptimedb.log || true
|
|
||||||
|
|
||||||
- name: Upload test logs on failure
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-logs
|
|
||||||
path: |
|
|
||||||
/tmp/greptimedb.log
|
|
||||||
java-tests/target/surefire-reports/
|
|
||||||
python-tests/.pytest_cache/
|
|
||||||
go-tests/*.log
|
|
||||||
**/test-output/
|
|
||||||
retention-days: 7
|
|
||||||
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
pkill -f greptime || true
|
|
||||||
26
.github/workflows/schedule.yml
vendored
26
.github/workflows/schedule.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: Schedule Management
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '4 2 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
maintenance:
|
|
||||||
name: Periodic Maintenance
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Do Maintenance
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/schedule.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
|
||||||
30
.github/workflows/semantic-pull-request.yml
vendored
30
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,30 +0,0 @@
|
|||||||
name: "Semantic Pull Request"
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- reopened
|
|
||||||
- edited
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 10
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: ./.github/actions/setup-cyborg
|
|
||||||
- name: Check Pull Request
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx bin/check-pull-request.ts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
25
.github/workflows/size-label.yml
vendored
Normal file
25
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: size-labeler
|
||||||
|
|
||||||
|
on: [pull_request_target]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Label the PR size
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: codelytv/pr-size-labeler@v1
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
s_label: 'Size: S'
|
||||||
|
s_max_size: '100'
|
||||||
|
m_label: 'Size: M'
|
||||||
|
m_max_size: '500'
|
||||||
|
l_label: 'Size: L'
|
||||||
|
l_max_size: '1000'
|
||||||
|
xl_label: 'Size: XL'
|
||||||
|
fail_if_xl: 'false'
|
||||||
|
message_if_xl: ""
|
||||||
|
files_to_ignore: 'Cargo.lock'
|
||||||
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Check user doc labels
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- reopened
|
||||||
|
- labeled
|
||||||
|
- unlabeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
check_labels:
|
||||||
|
name: Check doc labels
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: docker://agilepathway/pull-request-label-checker:latest
|
||||||
|
with:
|
||||||
|
one_of: Doc update required,Doc not needed
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
21
.gitignore
vendored
21
.gitignore
vendored
@@ -28,7 +28,6 @@ debug/
|
|||||||
# Logs
|
# Logs
|
||||||
**/__unittest_logs
|
**/__unittest_logs
|
||||||
logs/
|
logs/
|
||||||
!grafana/dashboards/logs/
|
|
||||||
|
|
||||||
# cpython's generated python byte code
|
# cpython's generated python byte code
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
@@ -47,23 +46,3 @@ benchmarks/data
|
|||||||
*.code-workspace
|
*.code-workspace
|
||||||
|
|
||||||
venv/
|
venv/
|
||||||
|
|
||||||
# Fuzz tests
|
|
||||||
tests-fuzz/artifacts/
|
|
||||||
tests-fuzz/corpus/
|
|
||||||
|
|
||||||
# cargo-udeps reports
|
|
||||||
udeps-report.json
|
|
||||||
|
|
||||||
# Nix
|
|
||||||
.direnv
|
|
||||||
.envrc
|
|
||||||
|
|
||||||
## default data home
|
|
||||||
greptimedb_data
|
|
||||||
|
|
||||||
# github
|
|
||||||
!/.github
|
|
||||||
|
|
||||||
# Claude code
|
|
||||||
CLAUDE.md
|
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: fmt
|
- id: fmt
|
||||||
- id: clippy
|
- id: clippy
|
||||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||||
stages: [pre-push]
|
stages: [push]
|
||||||
- id: cargo-check
|
- id: cargo-check
|
||||||
args: ["--workspace", "--all-targets", "--all-features"]
|
|
||||||
|
|||||||
43
AUTHOR.md
43
AUTHOR.md
@@ -1,43 +0,0 @@
|
|||||||
# GreptimeDB Authors
|
|
||||||
|
|
||||||
## Individual Committers (in alphabetical order)
|
|
||||||
|
|
||||||
- [apdong2022](https://github.com/apdong2022)
|
|
||||||
- [beryl678](https://github.com/beryl678)
|
|
||||||
- [CookiePieWw](https://github.com/CookiePieWw)
|
|
||||||
- [etolbakov](https://github.com/etolbakov)
|
|
||||||
- [irenjj](https://github.com/irenjj)
|
|
||||||
- [KKould](https://github.com/KKould)
|
|
||||||
- [Lanqing Yang](https://github.com/lyang24)
|
|
||||||
- [nicecui](https://github.com/nicecui)
|
|
||||||
- [NiwakaDev](https://github.com/NiwakaDev)
|
|
||||||
- [paomian](https://github.com/paomian)
|
|
||||||
- [tisonkun](https://github.com/tisonkun)
|
|
||||||
- [Wenjie0329](https://github.com/Wenjie0329)
|
|
||||||
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
|
||||||
- [zhongzc](https://github.com/zhongzc)
|
|
||||||
- [ZonaHex](https://github.com/ZonaHex)
|
|
||||||
- [zyy17](https://github.com/zyy17)
|
|
||||||
|
|
||||||
## Team Members (in alphabetical order)
|
|
||||||
|
|
||||||
- [daviderli614](https://github.com/daviderli614)
|
|
||||||
- [discord9](https://github.com/discord9)
|
|
||||||
- [evenyag](https://github.com/evenyag)
|
|
||||||
- [fengjiachun](https://github.com/fengjiachun)
|
|
||||||
- [fengys1996](https://github.com/fengys1996)
|
|
||||||
- [GrepTime](https://github.com/GrepTime)
|
|
||||||
- [holalengyu](https://github.com/holalengyu)
|
|
||||||
- [killme2008](https://github.com/killme2008)
|
|
||||||
- [MichaelScofield](https://github.com/MichaelScofield)
|
|
||||||
- [shuiyisong](https://github.com/shuiyisong)
|
|
||||||
- [sunchanglong](https://github.com/sunchanglong)
|
|
||||||
- [sunng87](https://github.com/sunng87)
|
|
||||||
- [v0y4g3r](https://github.com/v0y4g3r)
|
|
||||||
- [waynexia](https://github.com/waynexia)
|
|
||||||
- [WenyXu](https://github.com/WenyXu)
|
|
||||||
- [xtang](https://github.com/xtang)
|
|
||||||
|
|
||||||
## All Contributors
|
|
||||||
|
|
||||||
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
|
||||||
132
CODE_OF_CONDUCT.md
Normal file
132
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
|
any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email address,
|
||||||
|
without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
info@greptime.com.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
||||||
@@ -2,11 +2,7 @@
|
|||||||
|
|
||||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||||
|
|
||||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||||
|
|
||||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
|
||||||
|
|
||||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
|
||||||
|
|
||||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||||
|
|
||||||
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
|||||||
|
|
||||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||||
|
|
||||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
|
||||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||||
- Check the closed issues before opening your issue.
|
- Check the closed issues before opening your issue.
|
||||||
- Try to follow the existing style of the code.
|
- Try to follow the existing style of the code.
|
||||||
@@ -30,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@@ -54,19 +50,15 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`.
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
|
|
||||||
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
|
|
||||||
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
|
|
||||||
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
|
|
||||||
|
|
||||||
#### `pre-commit` Hooks
|
#### `pre-commit` Hooks
|
||||||
|
|
||||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||||
|
|
||||||
1. Install `pre-commit`
|
1. Install `pre-commit`
|
||||||
|
|
||||||
pip install pre-commit
|
pip install pre-commit
|
||||||
|
|
||||||
@@ -74,7 +66,7 @@ You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run
|
|||||||
|
|
||||||
brew install pre-commit
|
brew install pre-commit
|
||||||
|
|
||||||
2. Install the `pre-commit` hooks
|
2. Install the `pre-commit` hooks
|
||||||
|
|
||||||
$ pre-commit install
|
$ pre-commit install
|
||||||
pre-commit installed at .git/hooks/pre-commit
|
pre-commit installed at .git/hooks/pre-commit
|
||||||
@@ -112,7 +104,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
|||||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||||
|
|
||||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||||
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
|
|
||||||
Also, see some extra GreptimeDB content:
|
Also, see some extra GreptimeDB content:
|
||||||
|
|
||||||
|
|||||||
11818
Cargo.lock
generated
11818
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
258
Cargo.toml
258
Cargo.toml
@@ -1,263 +1,153 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
|
"benchmarks",
|
||||||
"src/api",
|
"src/api",
|
||||||
"src/auth",
|
"src/auth",
|
||||||
"src/cache",
|
|
||||||
"src/catalog",
|
"src/catalog",
|
||||||
"src/cli",
|
|
||||||
"src/client",
|
"src/client",
|
||||||
"src/cmd",
|
"src/cmd",
|
||||||
"src/common/base",
|
"src/common/base",
|
||||||
"src/common/catalog",
|
"src/common/catalog",
|
||||||
"src/common/config",
|
"src/common/config",
|
||||||
"src/common/datasource",
|
"src/common/datasource",
|
||||||
"src/common/decimal",
|
|
||||||
"src/common/error",
|
"src/common/error",
|
||||||
"src/common/event-recorder",
|
|
||||||
"src/common/frontend",
|
|
||||||
"src/common/function",
|
"src/common/function",
|
||||||
|
"src/common/macro",
|
||||||
"src/common/greptimedb-telemetry",
|
"src/common/greptimedb-telemetry",
|
||||||
"src/common/grpc",
|
"src/common/grpc",
|
||||||
"src/common/grpc-expr",
|
"src/common/grpc-expr",
|
||||||
"src/common/macro",
|
|
||||||
"src/common/mem-prof",
|
"src/common/mem-prof",
|
||||||
"src/common/meta",
|
"src/common/meta",
|
||||||
"src/common/options",
|
|
||||||
"src/common/plugins",
|
|
||||||
"src/common/pprof",
|
|
||||||
"src/common/procedure",
|
"src/common/procedure",
|
||||||
"src/common/procedure-test",
|
"src/common/procedure-test",
|
||||||
"src/common/query",
|
"src/common/query",
|
||||||
"src/common/recordbatch",
|
"src/common/recordbatch",
|
||||||
"src/common/runtime",
|
"src/common/runtime",
|
||||||
"src/common/session",
|
|
||||||
"src/common/sql",
|
|
||||||
"src/common/stat",
|
|
||||||
"src/common/substrait",
|
"src/common/substrait",
|
||||||
"src/common/telemetry",
|
"src/common/telemetry",
|
||||||
"src/common/test-util",
|
"src/common/test-util",
|
||||||
"src/common/time",
|
"src/common/time",
|
||||||
|
"src/common/decimal",
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
"src/common/wal",
|
|
||||||
"src/common/workload",
|
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
"src/datatypes",
|
"src/datatypes",
|
||||||
"src/file-engine",
|
"src/file-engine",
|
||||||
"src/flow",
|
|
||||||
"src/frontend",
|
"src/frontend",
|
||||||
"src/index",
|
|
||||||
"src/log-query",
|
|
||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
"src/metric-engine",
|
"src/metric-engine",
|
||||||
"src/mito-codec",
|
|
||||||
"src/mito2",
|
"src/mito2",
|
||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
"src/partition",
|
"src/partition",
|
||||||
"src/pipeline",
|
|
||||||
"src/plugins",
|
"src/plugins",
|
||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/standalone",
|
"src/script",
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
"tests-fuzz",
|
"src/index",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
]
|
]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.0.0-beta.2"
|
version = "0.5.0"
|
||||||
edition = "2024"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.lints]
|
|
||||||
clippy.print_stdout = "warn"
|
|
||||||
clippy.print_stderr = "warn"
|
|
||||||
clippy.dbg_macro = "warn"
|
|
||||||
clippy.implicit_clone = "warn"
|
|
||||||
clippy.result_large_err = "allow"
|
|
||||||
clippy.large_enum_variant = "allow"
|
|
||||||
clippy.doc_overindented_list_items = "allow"
|
|
||||||
clippy.uninlined_format_args = "allow"
|
|
||||||
rust.unknown_lints = "deny"
|
|
||||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
|
||||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
|
||||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
|
||||||
#
|
|
||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.6"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "56.2", features = ["prettyprint"] }
|
arrow = { version = "47.0" }
|
||||||
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] }
|
arrow-array = "47.0"
|
||||||
arrow-buffer = "56.2"
|
arrow-flight = "47.0"
|
||||||
arrow-flight = "56.2"
|
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||||
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] }
|
|
||||||
arrow-schema = { version = "56.2", features = ["serde"] }
|
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
# Remember to update axum-extra, axum-macros when updating axum
|
base64 = "0.21"
|
||||||
axum = "0.8"
|
|
||||||
axum-extra = "0.10"
|
|
||||||
axum-macros = "0.5"
|
|
||||||
backon = "1"
|
|
||||||
base64 = "0.22"
|
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
|
dashmap = "5.4"
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
config = "0.13.0"
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
const_format = "0.2"
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
crossbeam-utils = "0.8"
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
dashmap = "6.1"
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion = "50"
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-common = "50"
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-expr = "50"
|
derive_builder = "0.12"
|
||||||
datafusion-functions = "50"
|
etcd-client = "0.12"
|
||||||
datafusion-functions-aggregate-common = "50"
|
|
||||||
datafusion-optimizer = "50"
|
|
||||||
datafusion-orc = "0.5"
|
|
||||||
datafusion-pg-catalog = "0.12.2"
|
|
||||||
datafusion-physical-expr = "50"
|
|
||||||
datafusion-physical-plan = "50"
|
|
||||||
datafusion-sql = "50"
|
|
||||||
datafusion-substrait = "50"
|
|
||||||
deadpool = "0.12"
|
|
||||||
deadpool-postgres = "0.14"
|
|
||||||
derive_builder = "0.20"
|
|
||||||
dotenv = "0.15"
|
|
||||||
either = "1.15"
|
|
||||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
|
||||||
"tls",
|
|
||||||
"tls-roots",
|
|
||||||
] }
|
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a31ea166fc015ea7ff111ac94e26c3a5d64364d2" }
|
||||||
hex = "0.4"
|
|
||||||
http = "1"
|
|
||||||
humantime = "2.1"
|
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
hyper = "1.1"
|
itertools = "0.10"
|
||||||
hyper-util = "0.1"
|
|
||||||
itertools = "0.14"
|
|
||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
local-ip-address = "0.6"
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
|
mockall = "0.11.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
|
||||||
mockall = "0.13"
|
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
nalgebra = "0.33"
|
|
||||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
|
||||||
notify = "8.0"
|
|
||||||
num_cpus = "1.16"
|
|
||||||
object_store_opendal = "0.54"
|
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.30", features = [
|
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
"with-serde",
|
|
||||||
"logs",
|
|
||||||
] }
|
] }
|
||||||
ordered-float = { version = "4.3", features = ["serde"] }
|
parquet = "47.0"
|
||||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
|
||||||
"server",
|
|
||||||
] }
|
|
||||||
parking_lot = "0.12"
|
|
||||||
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] }
|
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
pretty_assertions = "1.4.0"
|
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.6", features = ["ser"] }
|
prost = "0.12"
|
||||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||||
prost-types = "0.13"
|
rand = "0.8"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
regex = "1.8"
|
||||||
rand = "0.9"
|
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||||
ratelimit = "0.10"
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
regex = "1.12"
|
|
||||||
regex-automata = "0.4"
|
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
"multipart",
|
|
||||||
] }
|
] }
|
||||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
rskafka = "0.5"
|
||||||
"transport-tls",
|
|
||||||
] }
|
|
||||||
rstest = "0.25"
|
|
||||||
rstest_reuse = "0.7"
|
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
|
||||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
|
||||||
hostname = "0.4.0"
|
|
||||||
rustls = { version = "0.23.25", default-features = false }
|
|
||||||
sea-query = "0.32"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = "1.0"
|
||||||
serde_with = "3"
|
smallvec = "1"
|
||||||
simd-json = "0.15"
|
snafu = "0.7"
|
||||||
similar-asserts = "1.6.0"
|
# on branch v0.38.x
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||||
snafu = "0.8"
|
"visitor",
|
||||||
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
] }
|
||||||
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
strum = { version = "0.27", features = ["derive"] }
|
|
||||||
sysinfo = "0.33"
|
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.47", features = ["full"] }
|
tokio = { version = "1.28", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
|
||||||
tokio-stream = "0.1"
|
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
|
tonic = { version = "0.10", features = ["tls"] }
|
||||||
tower = "0.5"
|
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||||
tower-http = "0.6"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-appender = "0.2"
|
|
||||||
tracing-opentelemetry = "0.31.0"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
|
||||||
typetag = "0.2"
|
|
||||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
|
||||||
vrl = "0.25"
|
|
||||||
zstd = "0.13"
|
|
||||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
cache = { path = "src/cache" }
|
|
||||||
catalog = { path = "src/catalog" }
|
catalog = { path = "src/catalog" }
|
||||||
cli = { path = "src/cli" }
|
|
||||||
client = { path = "src/client" }
|
client = { path = "src/client" }
|
||||||
cmd = { path = "src/cmd", default-features = false }
|
cmd = { path = "src/cmd" }
|
||||||
common-base = { path = "src/common/base" }
|
common-base = { path = "src/common/base" }
|
||||||
common-catalog = { path = "src/common/catalog" }
|
common-catalog = { path = "src/common/catalog" }
|
||||||
common-config = { path = "src/common/config" }
|
common-config = { path = "src/common/config" }
|
||||||
common-datasource = { path = "src/common/datasource" }
|
common-datasource = { path = "src/common/datasource" }
|
||||||
common-decimal = { path = "src/common/decimal" }
|
common-decimal = { path = "src/common/decimal" }
|
||||||
common-error = { path = "src/common/error" }
|
common-error = { path = "src/common/error" }
|
||||||
common-event-recorder = { path = "src/common/event-recorder" }
|
|
||||||
common-frontend = { path = "src/common/frontend" }
|
|
||||||
common-function = { path = "src/common/function" }
|
common-function = { path = "src/common/function" }
|
||||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||||
common-grpc = { path = "src/common/grpc" }
|
common-grpc = { path = "src/common/grpc" }
|
||||||
@@ -265,89 +155,49 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
|||||||
common-macro = { path = "src/common/macro" }
|
common-macro = { path = "src/common/macro" }
|
||||||
common-mem-prof = { path = "src/common/mem-prof" }
|
common-mem-prof = { path = "src/common/mem-prof" }
|
||||||
common-meta = { path = "src/common/meta" }
|
common-meta = { path = "src/common/meta" }
|
||||||
common-options = { path = "src/common/options" }
|
|
||||||
common-plugins = { path = "src/common/plugins" }
|
|
||||||
common-pprof = { path = "src/common/pprof" }
|
common-pprof = { path = "src/common/pprof" }
|
||||||
common-procedure = { path = "src/common/procedure" }
|
common-procedure = { path = "src/common/procedure" }
|
||||||
common-procedure-test = { path = "src/common/procedure-test" }
|
common-procedure-test = { path = "src/common/procedure-test" }
|
||||||
common-query = { path = "src/common/query" }
|
common-query = { path = "src/common/query" }
|
||||||
common-recordbatch = { path = "src/common/recordbatch" }
|
common-recordbatch = { path = "src/common/recordbatch" }
|
||||||
common-runtime = { path = "src/common/runtime" }
|
common-runtime = { path = "src/common/runtime" }
|
||||||
common-session = { path = "src/common/session" }
|
|
||||||
common-sql = { path = "src/common/sql" }
|
|
||||||
common-stat = { path = "src/common/stat" }
|
|
||||||
common-telemetry = { path = "src/common/telemetry" }
|
common-telemetry = { path = "src/common/telemetry" }
|
||||||
common-test-util = { path = "src/common/test-util" }
|
common-test-util = { path = "src/common/test-util" }
|
||||||
common-time = { path = "src/common/time" }
|
common-time = { path = "src/common/time" }
|
||||||
common-version = { path = "src/common/version" }
|
common-version = { path = "src/common/version" }
|
||||||
common-wal = { path = "src/common/wal" }
|
|
||||||
common-workload = { path = "src/common/workload" }
|
|
||||||
datanode = { path = "src/datanode" }
|
datanode = { path = "src/datanode" }
|
||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
flow = { path = "src/flow" }
|
frontend = { path = "src/frontend" }
|
||||||
frontend = { path = "src/frontend", default-features = false }
|
|
||||||
index = { path = "src/index" }
|
|
||||||
log-query = { path = "src/log-query" }
|
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
metric-engine = { path = "src/metric-engine" }
|
metric-engine = { path = "src/metric-engine" }
|
||||||
mito-codec = { path = "src/mito-codec" }
|
|
||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
partition = { path = "src/partition" }
|
partition = { path = "src/partition" }
|
||||||
pipeline = { path = "src/pipeline" }
|
|
||||||
plugins = { path = "src/plugins" }
|
plugins = { path = "src/plugins" }
|
||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
|
script = { path = "src/script" }
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
standalone = { path = "src/standalone" }
|
|
||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
|
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|
||||||
[profile.nightly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
strip = "debuginfo"
|
strip = true
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
debug = false
|
debug = false
|
||||||
incremental = false
|
incremental = false
|
||||||
|
|
||||||
[profile.ci]
|
|
||||||
inherits = "dev"
|
|
||||||
strip = true
|
|
||||||
|
|
||||||
[profile.dev.package.sqlness-runner]
|
|
||||||
debug = false
|
|
||||||
strip = true
|
|
||||||
|
|
||||||
[profile.dev.package.tests-fuzz]
|
|
||||||
debug = false
|
|
||||||
strip = true
|
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
[target.aarch64-unknown-linux-gnu]
|
|
||||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
|
||||||
|
|
||||||
[build]
|
[build]
|
||||||
pre-build = [
|
pre-build = [
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||||
@@ -8,8 +5,3 @@ pre-build = [
|
|||||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||||
]
|
]
|
||||||
|
|
||||||
[build.env]
|
|
||||||
passthrough = [
|
|
||||||
"JEMALLOC_SYS_WITH_LG_PAGE",
|
|
||||||
]
|
|
||||||
|
|||||||
98
Makefile
98
Makefile
@@ -3,12 +3,10 @@ CARGO_PROFILE ?=
|
|||||||
FEATURES ?=
|
FEATURES ?=
|
||||||
TARGET_DIR ?=
|
TARGET_DIR ?=
|
||||||
TARGET ?=
|
TARGET ?=
|
||||||
BUILD_BIN ?= greptime
|
|
||||||
CARGO_BUILD_OPTS := --locked
|
CARGO_BUILD_OPTS := --locked
|
||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2025-10-01-8fe17d43-20251011080129
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -16,15 +14,12 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
|||||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||||
SQLNESS_OPTS ?=
|
|
||||||
EXTRA_BUILD_ENVS ?=
|
|
||||||
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
|
||||||
|
|
||||||
# The arguments for running integration tests.
|
# The arguments for running integration tests.
|
||||||
ETCD_VERSION ?= v3.5.9
|
ETCD_VERSION ?= v3.5.9
|
||||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||||
RETRY_COUNT ?= 3
|
RETRY_COUNT ?= 3
|
||||||
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend
|
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||||
BUILD_JOBS := 1
|
BUILD_JOBS := 1
|
||||||
@@ -34,10 +29,6 @@ ifneq ($(strip $(BUILD_JOBS)),)
|
|||||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(strip $(BUILD_JOBS)),)
|
|
||||||
SQLNESS_OPTS += --jobs ${BUILD_JOBS}
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||||
endif
|
endif
|
||||||
@@ -54,20 +45,12 @@ ifneq ($(strip $(TARGET)),)
|
|||||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(strip $(BUILD_BIN)),)
|
|
||||||
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(strip $(RELEASE)),)
|
ifneq ($(strip $(RELEASE)),)
|
||||||
CARGO_BUILD_OPTS += --release
|
CARGO_BUILD_OPTS += --release
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
|
||||||
else
|
else
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||||
endif
|
endif
|
||||||
@@ -82,12 +65,11 @@ endif
|
|||||||
build: ## Build debug version greptime.
|
build: ## Build debug version greptime.
|
||||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||||
|
|
||||||
.PHONY: build-by-dev-builder
|
.POHNY: build-by-dev-builder
|
||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||||
@@ -101,7 +83,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
|||||||
build-android-bin: ## Build greptime binary for android.
|
build-android-bin: ## Build greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||||
make build \
|
make build \
|
||||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||||
CARGO_PROFILE=release \
|
CARGO_PROFILE=release \
|
||||||
@@ -115,8 +97,8 @@ build-android-bin: ## Build greptime binary for android.
|
|||||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ## Clean the project.
|
clean: ## Clean the project.
|
||||||
@@ -155,34 +137,23 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
|||||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||||
|
|
||||||
.PHONY: multi-platform-buildx
|
.PHONY: multi-platform-buildx
|
||||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||||
|
|
||||||
##@ Test
|
##@ Test
|
||||||
.PHONY: test
|
|
||||||
test: nextest ## Run unit and integration tests.
|
test: nextest ## Run unit and integration tests.
|
||||||
cargo nextest run ${NEXTEST_OPTS}
|
cargo nextest run ${NEXTEST_OPTS}
|
||||||
|
|
||||||
.PHONY: nextest
|
.PHONY: nextest ## Install nextest tools.
|
||||||
nextest: ## Install nextest tools.
|
nextest:
|
||||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||||
|
|
||||||
.PHONY: sqlness-test
|
.PHONY: sqlness-test
|
||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness bare ${SQLNESS_OPTS}
|
cargo sqlness
|
||||||
|
|
||||||
RUNS ?= 1
|
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
|
||||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
|
||||||
|
|
||||||
.PHONY: fuzz-ls
|
|
||||||
fuzz-ls: ## List all fuzz targets.
|
|
||||||
cargo fuzz list --fuzz-dir tests-fuzz
|
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
@@ -192,26 +163,9 @@ check: ## Cargo check all the targets.
|
|||||||
clippy: ## Check clippy rules.
|
clippy: ## Check clippy rules.
|
||||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||||
|
|
||||||
.PHONY: fix-clippy
|
|
||||||
fix-clippy: ## Fix clippy violations.
|
|
||||||
cargo clippy --workspace --all-targets --all-features --fix
|
|
||||||
|
|
||||||
.PHONY: check-udeps
|
|
||||||
check-udeps: ## Check unused dependencies.
|
|
||||||
cargo udeps --workspace --all-targets
|
|
||||||
|
|
||||||
.PHONY: fix-udeps
|
|
||||||
fix-udeps: ## Remove unused dependencies automatically.
|
|
||||||
@echo "Running cargo-udeps to find unused dependencies..."
|
|
||||||
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
|
|
||||||
@echo "Removing unused dependencies..."
|
|
||||||
@python3 scripts/fix-udeps.py udeps-report.json
|
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
python3 scripts/check-snafu.py
|
|
||||||
python3 scripts/check-super-imports.py
|
|
||||||
|
|
||||||
.PHONY: start-etcd
|
.PHONY: start-etcd
|
||||||
start-etcd: ## Start single node etcd for testing purpose.
|
start-etcd: ## Start single node etcd for testing purpose.
|
||||||
@@ -225,37 +179,9 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
|||||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||||
|
|
||||||
.PHONY: start-cluster
|
|
||||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
|
||||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
|
||||||
|
|
||||||
.PHONY: stop-cluster
|
|
||||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
|
||||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
|
||||||
|
|
||||||
##@ Grafana
|
|
||||||
|
|
||||||
.PHONY: check-dashboards
|
|
||||||
check-dashboards: ## Check the Grafana dashboards.
|
|
||||||
@./grafana/scripts/check.sh
|
|
||||||
|
|
||||||
.PHONY: dashboards
|
|
||||||
dashboards: ## Generate the Grafana dashboards for standalone mode and intermediate dashboards.
|
|
||||||
@./grafana/scripts/gen-dashboards.sh
|
|
||||||
|
|
||||||
##@ Docs
|
|
||||||
config-docs: ## Generate configuration documentation from toml files.
|
|
||||||
docker run --rm \
|
|
||||||
-v ${PWD}:/greptimedb \
|
|
||||||
-w /greptimedb/config \
|
|
||||||
toml2docs/toml2docs:v0.1.3 \
|
|
||||||
-p '##' \
|
|
||||||
-t ./config-docs-template.md \
|
|
||||||
-o ./config.md
|
|
||||||
|
|
||||||
##@ General
|
##@ General
|
||||||
|
|
||||||
# The help target prints out all targets with their descriptions organized
|
# The help target prints out all targets with their descriptions organized
|
||||||
|
|||||||
315
README.md
315
README.md
@@ -1,226 +1,185 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<picture>
|
<picture>
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png">
|
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding-dark.png">
|
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png" width="400px">
|
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||||
</picture>
|
</picture>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2>
|
|
||||||
|
|
||||||
> Delivers sub-second querying at PB scale and exceptional cost efficiency from edge to cloud.
|
|
||||||
|
|
||||||
<div align="center">
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
|
The next-generation hybrid time-series/analytics processing database in the cloud
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
</h3>
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
|
||||||
</h4>
|
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
|
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
|
||||||
</a>
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
|
||||||
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
|
|
||||||
</a>
|
<a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
|
||||||
<a href="https://hub.docker.com/r/greptime/greptimedb/">
|
</p>
|
||||||
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
|
|
||||||
</a>
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
|
|
||||||
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
|
|
||||||
</a>
|
|
||||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
|
|
||||||
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
|
|
||||||
</a>
|
|
||||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
|
|
||||||
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
|
|
||||||
</a>
|
|
||||||
|
|
||||||
<br/>
|
<p align="center">
|
||||||
|
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
||||||
|
|
||||||
|
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
||||||
|
|
||||||
|
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
<a href="https://greptime.com/slack">
|
> [!WARNING]
|
||||||
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
|
> Our default branch has changed from `develop` to `main` (issue [#3025](https://github.com/GreptimeTeam/greptimedb/issues/3025)). Please update your local repository to use the `main` branch.
|
||||||
</a>
|
|
||||||
<a href="https://twitter.com/greptime">
|
|
||||||
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
|
|
||||||
</a>
|
|
||||||
<a href="https://www.linkedin.com/company/greptime/">
|
|
||||||
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
## What is GreptimeDB
|
||||||
- [⭐ Key Features](#features)
|
|
||||||
- [Quick Comparison](#quick-comparison)
|
|
||||||
- [Architecture](#architecture)
|
|
||||||
- [Try GreptimeDB](#try-greptimedb)
|
|
||||||
- [Getting Started](#getting-started)
|
|
||||||
- [Build From Source](#build-from-source)
|
|
||||||
- [Tools & Extensions](#tools--extensions)
|
|
||||||
- [Project Status](#project-status)
|
|
||||||
- [Community](#community)
|
|
||||||
- [License](#license)
|
|
||||||
- [Commercial Support](#commercial-support)
|
|
||||||
- [Contributing](#contributing)
|
|
||||||
- [Acknowledgement](#acknowledgement)
|
|
||||||
|
|
||||||
## Introduction
|
GreptimeDB is an open-source time-series database with a special focus on
|
||||||
|
scalability, analytical capabilities and efficiency. It's designed to work on
|
||||||
|
infrastructure of the cloud era, and users benefit from its elasticity and commodity
|
||||||
|
storage.
|
||||||
|
|
||||||
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
|
Our core developers have been building time-series data platform
|
||||||
|
for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||||
|
|
||||||
## Features
|
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
|
||||||
|
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
|
||||||
|
- Flexible indexes, tackling high cardinality issues down
|
||||||
|
- Distributed, parallel query execution, leveraging elastic computing resource
|
||||||
|
- Native SQL, and Python scripting for advanced analytical scenarios
|
||||||
|
- Widely adopted database protocols and APIs, native PromQL supports
|
||||||
|
- Extensible table engine architecture for extensive workloads
|
||||||
|
|
||||||
| Feature | Description |
|
## Quick Start
|
||||||
| --------- | ----------- |
|
|
||||||
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
|
||||||
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
|
|
||||||
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
|
|
||||||
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
|
|
||||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
|
|
||||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
|
|
||||||
|
|
||||||
✅ **Perfect for:**
|
### [GreptimePlay](https://greptime.com/playground)
|
||||||
- Unified observability stack replacing Prometheus + Loki + Tempo
|
|
||||||
- Large-scale metrics with high cardinality (millions to billions of time series)
|
|
||||||
- Large-scale observability platform requiring cost efficiency and scalability
|
|
||||||
- IoT and edge computing with resource and bandwidth constraints
|
|
||||||
|
|
||||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
Try out the features of GreptimeDB right from your browser.
|
||||||
|
|
||||||
## Quick Comparison
|
### Build
|
||||||
|
|
||||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
#### Build from Source
|
||||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
|
||||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
|
||||||
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
|
|
||||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
|
||||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
|
||||||
| Integration | REST API, SQL, Common protocols | Varies | Varies |
|
|
||||||
|
|
||||||
**Performance:**
|
To compile GreptimeDB from source, you'll need:
|
||||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
|
||||||
* [TSBS Benchmark](https://github.com/GreptimeTeam/greptimedb/tree/main/docs/benchmarks/tsbs)
|
|
||||||
|
|
||||||
Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/features-that-you-concern#how-is-greptimedbs-performance-compared-to-other-solutions).
|
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
|
||||||
|
available as `build-essential` on ubuntu and similar name on other platforms.
|
||||||
|
- Rust: the easiest way to install Rust is to use
|
||||||
|
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
|
||||||
|
install correct Rust version for you.
|
||||||
|
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
|
||||||
|
available from major package manager on macos and linux distributions. You can
|
||||||
|
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||||
|
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||||
|
keyword. You can check it with `protoc --version`.
|
||||||
|
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||||
|
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||||
|
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||||
|
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||||
|
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||||
|
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||||
|
|
||||||
## Architecture
|
#### Build with Docker
|
||||||
|
|
||||||
GreptimeDB can run in two modes:
|
A docker image with necessary dependencies is provided:
|
||||||
* **Standalone Mode** - Single binary for development and small deployments
|
|
||||||
* **Distributed Mode** - Separate components for production scale:
|
|
||||||
- Frontend: Query processing and protocol handling
|
|
||||||
- Datanode: Data storage and retrieval
|
|
||||||
- Metasrv: Metadata management and coordination
|
|
||||||
|
|
||||||
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
|
||||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
|
||||||
|
|
||||||
## Try GreptimeDB
|
```
|
||||||
|
docker build --network host -f docker/Dockerfile -t greptimedb .
|
||||||
```shell
|
|
||||||
docker pull greptime/greptimedb
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```shell
|
### Run
|
||||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
|
||||||
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
|
Start GreptimeDB from source code, in standalone mode:
|
||||||
--name greptime --rm \
|
|
||||||
greptime/greptimedb:latest standalone start \
|
|
||||||
--http-addr 0.0.0.0:4000 \
|
|
||||||
--rpc-bind-addr 0.0.0.0:4001 \
|
|
||||||
--mysql-addr 0.0.0.0:4002 \
|
|
||||||
--postgres-addr 0.0.0.0:4003
|
|
||||||
```
|
```
|
||||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
|
||||||
|
|
||||||
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
|
|
||||||
|
|
||||||
**Troubleshooting:**
|
|
||||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
|
||||||
* Failed to start? Check the container logs with `docker logs greptime` for further details.
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
- [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
|
||||||
- [User Guide](https://docs.greptime.com/user-guide/overview)
|
|
||||||
- [Demo Scenes](https://github.com/GreptimeTeam/demo-scene)
|
|
||||||
- [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
|
||||||
|
|
||||||
## Build From Source
|
|
||||||
|
|
||||||
**Prerequisites:**
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
|
||||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
|
||||||
* Python toolchain (optional): Required only if using some test scripts.
|
|
||||||
|
|
||||||
**Build and Run:**
|
|
||||||
```bash
|
|
||||||
make
|
|
||||||
cargo run -- standalone start
|
cargo run -- standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
## Tools & Extensions
|
Or if you built from docker:
|
||||||
|
|
||||||
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
```
|
||||||
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||||
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
|
```
|
||||||
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
|
||||||
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
|
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||||
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
|
||||||
|
### Get started
|
||||||
|
|
||||||
|
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||||
|
|
||||||
|
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
- [Pre-built Binaries](https://greptime.com/download):
|
||||||
|
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||||
|
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||||
|
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||||
|
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||||
|
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||||
|
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||||
|
Kubernetes deployment
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||||
|
- GreptimeDB [Developer
|
||||||
|
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||||
|
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||||
|
|
||||||
|
### Dashboard
|
||||||
|
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||||
|
|
||||||
|
### SDK
|
||||||
|
|
||||||
|
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||||
|
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||||
|
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
|
||||||
|
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||||
|
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||||
|
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||||
|
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
> **Status:** Beta — marching toward v1.0 GA!
|
This project is in its early stage and under heavy development. We move fast and
|
||||||
> **GA (v1.0):** January 10, 2026
|
break things. Benchmark on development branch may not represent its potential
|
||||||
|
performance. We release pre-built binaries constantly for functional
|
||||||
|
evaluation. Do not use it in production at the moment.
|
||||||
|
|
||||||
- Deployed in production by open-source projects and commercial users
|
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
|
||||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
|
||||||
- Suitable for evaluation and pilot deployments
|
|
||||||
|
|
||||||
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
|
|
||||||
|
|
||||||
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
|
|
||||||
|
|
||||||
For production use, we recommend using the latest stable release.
|
|
||||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
|
||||||
|
|
||||||
If you find this project useful, a ⭐ would mean a lot to us!
|
|
||||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
We invite you to engage and contribute!
|
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||||
|
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||||
|
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||||
|
community, please check out:
|
||||||
|
|
||||||
- [Slack](https://greptime.com/slack)
|
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||||
- [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
- [Official Website](https://greptime.com/)
|
- Greptime official [Website](https://greptime.com)
|
||||||
- [Blog](https://greptime.com/blogs/)
|
|
||||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
In addition, you may:
|
||||||
- [X (Twitter)](https://X.com/greptime)
|
|
||||||
- [YouTube](https://www.youtube.com/@greptime)
|
- View our official [Blog](https://greptime.com/blogs/index)
|
||||||
|
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||||
|
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt).
|
GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
|
||||||
|
open contributions and allowing you to use the software however you want.
|
||||||
|
|
||||||
## Commercial Support
|
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
|
||||||
|
|
||||||
Running GreptimeDB in your organization?
|
|
||||||
We offer enterprise add-ons, services, training, and consulting.
|
|
||||||
[Contact us](https://greptime.com/contactus) for details.
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
- Read our [Contribution Guidelines](https://github.com/GreptimeTeam/greptimedb/blob/main/CONTRIBUTING.md).
|
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||||
- Explore [Internal Concepts](https://docs.greptime.com/contributor-guide/overview.html) and [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb).
|
|
||||||
- Pick up a [good first issue](https://github.com/GreptimeTeam/greptimedb/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and join the #contributors [Slack](https://greptime.com/slack) channel.
|
|
||||||
|
|
||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
|
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||||
Special thanks to all contributors! See [AUTHORS.md](https://github.com/GreptimeTeam/greptimedb/blob/main/AUTHOR.md).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||||
|
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||||
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
|
||||||
|
|||||||
16
benchmarks/Cargo.toml
Normal file
16
benchmarks/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "benchmarks"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
arrow.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
client.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
|
indicatif = "0.17.1"
|
||||||
|
itertools.workspace = true
|
||||||
|
parquet.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
543
benchmarks/src/bin/nyc-taxi.rs
Normal file
543
benchmarks/src/bin/nyc-taxi.rs
Normal file
@@ -0,0 +1,543 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||||
|
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||||
|
|
||||||
|
#![allow(clippy::print_stdout)]
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||||
|
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||||
|
use arrow::record_batch::RecordBatch;
|
||||||
|
use clap::Parser;
|
||||||
|
use client::api::v1::column::Values;
|
||||||
|
use client::api::v1::{
|
||||||
|
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||||
|
};
|
||||||
|
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use futures_util::TryStreamExt;
|
||||||
|
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||||
|
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||||
|
use tokio::task::JoinSet;
|
||||||
|
|
||||||
|
const CATALOG_NAME: &str = "greptime";
|
||||||
|
const SCHEMA_NAME: &str = "public";
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(name = "NYC benchmark runner")]
|
||||||
|
struct Args {
|
||||||
|
/// Path to the dataset
|
||||||
|
#[arg(short, long)]
|
||||||
|
path: Option<String>,
|
||||||
|
|
||||||
|
/// Batch size of insert request.
|
||||||
|
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
||||||
|
batch_size: usize,
|
||||||
|
|
||||||
|
/// Number of client threads on write (parallel on file level)
|
||||||
|
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
||||||
|
thread_num: usize,
|
||||||
|
|
||||||
|
/// Number of query iteration
|
||||||
|
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
||||||
|
iter_num: usize,
|
||||||
|
|
||||||
|
#[arg(long = "skip-write")]
|
||||||
|
skip_write: bool,
|
||||||
|
|
||||||
|
#[arg(long = "skip-read")]
|
||||||
|
skip_read: bool,
|
||||||
|
|
||||||
|
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||||
|
endpoint: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||||
|
std::fs::read_dir(path)
|
||||||
|
.unwrap()
|
||||||
|
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_table_name() -> String {
|
||||||
|
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_data(
|
||||||
|
table_name: &str,
|
||||||
|
batch_size: usize,
|
||||||
|
db: &Database,
|
||||||
|
path: PathBuf,
|
||||||
|
mpb: MultiProgress,
|
||||||
|
pb_style: ProgressStyle,
|
||||||
|
) -> u128 {
|
||||||
|
let file = std::fs::File::open(&path).unwrap();
|
||||||
|
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
||||||
|
let row_num = record_batch_reader_builder
|
||||||
|
.metadata()
|
||||||
|
.file_metadata()
|
||||||
|
.num_rows();
|
||||||
|
let record_batch_reader = record_batch_reader_builder
|
||||||
|
.with_batch_size(batch_size)
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||||
|
progress_bar.set_style(pb_style);
|
||||||
|
progress_bar.set_message(format!("{path:?}"));
|
||||||
|
|
||||||
|
let mut total_rpc_elapsed_ms = 0;
|
||||||
|
|
||||||
|
for record_batch in record_batch_reader {
|
||||||
|
let record_batch = record_batch.unwrap();
|
||||||
|
if !is_record_batch_full(&record_batch) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let (columns, row_count) = convert_record_batch(record_batch);
|
||||||
|
let request = InsertRequest {
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
columns,
|
||||||
|
row_count,
|
||||||
|
};
|
||||||
|
let requests = InsertRequests {
|
||||||
|
inserts: vec![request],
|
||||||
|
};
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
db.insert(requests).await.unwrap();
|
||||||
|
let elapsed = now.elapsed();
|
||||||
|
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||||
|
progress_bar.inc(row_count as _);
|
||||||
|
}
|
||||||
|
|
||||||
|
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||||
|
total_rpc_elapsed_ms
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||||
|
let schema = record_batch.schema();
|
||||||
|
let fields = schema.fields();
|
||||||
|
let row_count = record_batch.num_rows();
|
||||||
|
let mut columns = vec![];
|
||||||
|
|
||||||
|
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||||
|
let (values, datatype) = build_values(array);
|
||||||
|
let semantic_type = match field.name().as_str() {
|
||||||
|
"VendorID" => SemanticType::Tag,
|
||||||
|
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||||
|
_ => SemanticType::Field,
|
||||||
|
};
|
||||||
|
|
||||||
|
let column = Column {
|
||||||
|
column_name: field.name().clone(),
|
||||||
|
values: Some(values),
|
||||||
|
null_mask: array
|
||||||
|
.to_data()
|
||||||
|
.nulls()
|
||||||
|
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
datatype: datatype.into(),
|
||||||
|
semantic_type: semantic_type as i32,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
columns.push(column);
|
||||||
|
}
|
||||||
|
|
||||||
|
(columns, row_count as _)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||||
|
match column.data_type() {
|
||||||
|
DataType::Int64 => {
|
||||||
|
let array = column
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||||
|
.unwrap();
|
||||||
|
let values = array.values();
|
||||||
|
(
|
||||||
|
Values {
|
||||||
|
i64_values: values.to_vec(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDataType::Int64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
DataType::Float64 => {
|
||||||
|
let array = column
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||||
|
.unwrap();
|
||||||
|
let values = array.values();
|
||||||
|
(
|
||||||
|
Values {
|
||||||
|
f64_values: values.to_vec(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDataType::Float64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
DataType::Timestamp(_, _) => {
|
||||||
|
let array = column
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<TimestampMicrosecondArray>()
|
||||||
|
.unwrap();
|
||||||
|
let values = array.values();
|
||||||
|
(
|
||||||
|
Values {
|
||||||
|
timestamp_microsecond_values: values.to_vec(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDataType::TimestampMicrosecond,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
DataType::Utf8 => {
|
||||||
|
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||||
|
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||||
|
(
|
||||||
|
Values {
|
||||||
|
string_values: values,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDataType::String,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
DataType::Null
|
||||||
|
| DataType::Boolean
|
||||||
|
| DataType::Int8
|
||||||
|
| DataType::Int16
|
||||||
|
| DataType::Int32
|
||||||
|
| DataType::UInt8
|
||||||
|
| DataType::UInt16
|
||||||
|
| DataType::UInt32
|
||||||
|
| DataType::UInt64
|
||||||
|
| DataType::Float16
|
||||||
|
| DataType::Float32
|
||||||
|
| DataType::Date32
|
||||||
|
| DataType::Date64
|
||||||
|
| DataType::Time32(_)
|
||||||
|
| DataType::Time64(_)
|
||||||
|
| DataType::Duration(_)
|
||||||
|
| DataType::Interval(_)
|
||||||
|
| DataType::Binary
|
||||||
|
| DataType::FixedSizeBinary(_)
|
||||||
|
| DataType::LargeBinary
|
||||||
|
| DataType::LargeUtf8
|
||||||
|
| DataType::List(_)
|
||||||
|
| DataType::FixedSizeList(_, _)
|
||||||
|
| DataType::LargeList(_)
|
||||||
|
| DataType::Struct(_)
|
||||||
|
| DataType::Union(_, _)
|
||||||
|
| DataType::Dictionary(_, _)
|
||||||
|
| DataType::Decimal128(_, _)
|
||||||
|
| DataType::Decimal256(_, _)
|
||||||
|
| DataType::RunEndEncoded(_, _)
|
||||||
|
| DataType::Map(_, _) => todo!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||||
|
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||||
|
CreateTableExpr {
|
||||||
|
catalog_name: CATALOG_NAME.to_string(),
|
||||||
|
schema_name: SCHEMA_NAME.to_string(),
|
||||||
|
table_name: table_name.to_string(),
|
||||||
|
desc: "".to_string(),
|
||||||
|
column_defs: vec![
|
||||||
|
ColumnDef {
|
||||||
|
name: "VendorID".to_string(),
|
||||||
|
data_type: ColumnDataType::Int64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Tag as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "tpep_pickup_datetime".to_string(),
|
||||||
|
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||||
|
is_nullable: false,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "tpep_dropoff_datetime".to_string(),
|
||||||
|
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "passenger_count".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "trip_distance".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "RatecodeID".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "store_and_fwd_flag".to_string(),
|
||||||
|
data_type: ColumnDataType::String as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "PULocationID".to_string(),
|
||||||
|
data_type: ColumnDataType::Int64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "DOLocationID".to_string(),
|
||||||
|
data_type: ColumnDataType::Int64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "payment_type".to_string(),
|
||||||
|
data_type: ColumnDataType::Int64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "fare_amount".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "extra".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "mta_tax".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "tip_amount".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "tolls_amount".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "improvement_surcharge".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "total_amount".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "congestion_surcharge".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: "airport_fee".to_string(),
|
||||||
|
data_type: ColumnDataType::Float64 as i32,
|
||||||
|
is_nullable: true,
|
||||||
|
default_constraint: vec![],
|
||||||
|
semantic_type: SemanticType::Field as i32,
|
||||||
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
],
|
||||||
|
time_index: "tpep_pickup_datetime".to_string(),
|
||||||
|
primary_keys: vec!["VendorID".to_string()],
|
||||||
|
create_if_not_exists: true,
|
||||||
|
table_options: Default::default(),
|
||||||
|
table_id: None,
|
||||||
|
engine: "mito".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||||
|
HashMap::from([
|
||||||
|
(
|
||||||
|
"count_all".to_string(),
|
||||||
|
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"fare_amt_by_passenger".to_string(),
|
||||||
|
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||||
|
)
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||||
|
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||||
|
let mut write_jobs = JoinSet::new();
|
||||||
|
|
||||||
|
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||||
|
println!("Create table result: {create_table_result:?}");
|
||||||
|
|
||||||
|
let progress_bar_style = ProgressStyle::with_template(
|
||||||
|
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.progress_chars("##-");
|
||||||
|
let multi_progress_bar = MultiProgress::new();
|
||||||
|
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
||||||
|
file_progress.inc(0);
|
||||||
|
|
||||||
|
let batch_size = args.batch_size;
|
||||||
|
for _ in 0..args.thread_num {
|
||||||
|
if let Some(path) = file_list.pop() {
|
||||||
|
let db = db.clone();
|
||||||
|
let mpb = multi_progress_bar.clone();
|
||||||
|
let pb_style = progress_bar_style.clone();
|
||||||
|
let table_name = table_name.to_string();
|
||||||
|
let _ = write_jobs.spawn(async move {
|
||||||
|
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while write_jobs.join_next().await.is_some() {
|
||||||
|
file_progress.inc(1);
|
||||||
|
if let Some(path) = file_list.pop() {
|
||||||
|
let db = db.clone();
|
||||||
|
let mpb = multi_progress_bar.clone();
|
||||||
|
let pb_style = progress_bar_style.clone();
|
||||||
|
let table_name = table_name.to_string();
|
||||||
|
let _ = write_jobs.spawn(async move {
|
||||||
|
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||||
|
for (query_name, query) in query_set(table_name) {
|
||||||
|
println!("Running query: {query}");
|
||||||
|
for i in 0..num_iter {
|
||||||
|
let now = Instant::now();
|
||||||
|
let res = db.sql(&query).await.unwrap();
|
||||||
|
match res {
|
||||||
|
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||||
|
Output::Stream(stream) => {
|
||||||
|
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let elapsed = now.elapsed();
|
||||||
|
println!(
|
||||||
|
"query {}, iteration {}: {}ms",
|
||||||
|
query_name,
|
||||||
|
i,
|
||||||
|
elapsed.as_millis(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.worker_threads(args.thread_num)
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
.block_on(async {
|
||||||
|
let client = Client::with_urls(vec![&args.endpoint]);
|
||||||
|
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||||
|
let table_name = new_table_name();
|
||||||
|
|
||||||
|
if !args.skip_write {
|
||||||
|
do_write(&args, &db, &table_name).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !args.skip_read {
|
||||||
|
do_query(args.iter_num, &db, &table_name).await;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
127
cliff.toml
127
cliff.toml
@@ -1,127 +0,0 @@
|
|||||||
# https://git-cliff.org/docs/configuration
|
|
||||||
|
|
||||||
[remote.github]
|
|
||||||
owner = "GreptimeTeam"
|
|
||||||
repo = "greptimedb"
|
|
||||||
|
|
||||||
[changelog]
|
|
||||||
header = ""
|
|
||||||
footer = ""
|
|
||||||
# template for the changelog body
|
|
||||||
# https://keats.github.io/tera/docs/#introduction
|
|
||||||
body = """
|
|
||||||
# {{ version }}
|
|
||||||
|
|
||||||
Release date: {{ timestamp | date(format="%B %d, %Y") }}
|
|
||||||
|
|
||||||
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
|
|
||||||
{%- if breakings | length > 0 %}
|
|
||||||
|
|
||||||
## Breaking changes
|
|
||||||
{% for commit in breakings %}
|
|
||||||
* {{ commit.github.pr_title }}\
|
|
||||||
{% if commit.github.username %} by \
|
|
||||||
{% set author = commit.github.username -%}
|
|
||||||
[@{{ author }}](https://github.com/{{ author }})
|
|
||||||
{%- endif -%}
|
|
||||||
{% if commit.github.pr_number %} in \
|
|
||||||
{% set number = commit.github.pr_number -%}
|
|
||||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
|
||||||
{%- endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif -%}
|
|
||||||
|
|
||||||
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
|
|
||||||
{% for group, commits in grouped_commits %}
|
|
||||||
|
|
||||||
### {{ group | striptags | trim | upper_first }}
|
|
||||||
{% for commit in commits %}
|
|
||||||
* {{ commit.github.pr_title }}\
|
|
||||||
{% if commit.github.username %} by \
|
|
||||||
{% set author = commit.github.username -%}
|
|
||||||
[@{{ author }}](https://github.com/{{ author }})
|
|
||||||
{%- endif -%}
|
|
||||||
{% if commit.github.pr_number %} in \
|
|
||||||
{% set number = commit.github.pr_number -%}
|
|
||||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
|
||||||
{%- endif %}
|
|
||||||
{%- endfor -%}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
|
||||||
{% raw %}\n{% endraw -%}
|
|
||||||
## New Contributors
|
|
||||||
{% endif -%}
|
|
||||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
|
||||||
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
|
|
||||||
{%- if contributor.pr_number %} in \
|
|
||||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
|
||||||
{%- endif %}
|
|
||||||
{%- endfor -%}
|
|
||||||
|
|
||||||
{% if github.contributors | length != 0 %}
|
|
||||||
{% raw %}\n{% endraw -%}
|
|
||||||
## All Contributors
|
|
||||||
|
|
||||||
We would like to thank the following contributors from the GreptimeDB community:
|
|
||||||
|
|
||||||
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
|
|
||||||
{%- set bots = ['dependabot[bot]'] %}
|
|
||||||
|
|
||||||
{% for contributor in contributors %}
|
|
||||||
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
|
|
||||||
{%- if loop.first -%}
|
|
||||||
[@{{ contributor }}](https://github.com/{{ contributor }})
|
|
||||||
{%- else -%}
|
|
||||||
, [@{{ contributor }}](https://github.com/{{ contributor }})
|
|
||||||
{%- endif -%}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif %}
|
|
||||||
{% raw %}\n{% endraw %}
|
|
||||||
|
|
||||||
{%- macro remote_url() -%}
|
|
||||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
|
||||||
{%- endmacro -%}
|
|
||||||
"""
|
|
||||||
trim = true
|
|
||||||
|
|
||||||
[git]
|
|
||||||
# parse the commits based on https://www.conventionalcommits.org
|
|
||||||
conventional_commits = true
|
|
||||||
# filter out the commits that are not conventional
|
|
||||||
filter_unconventional = true
|
|
||||||
# process each line of a commit as an individual commit
|
|
||||||
split_commits = false
|
|
||||||
# regex for parsing and grouping commits
|
|
||||||
commit_parsers = [
|
|
||||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
|
||||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
|
||||||
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
|
||||||
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
|
||||||
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
|
||||||
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
|
||||||
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
|
||||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
|
||||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
|
||||||
{ message = "^chore\\(pr\\)", skip = true },
|
|
||||||
{ message = "^chore\\(pull\\)", skip = true },
|
|
||||||
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
|
||||||
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
|
||||||
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
|
||||||
]
|
|
||||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
|
||||||
protect_breaking_commits = false
|
|
||||||
# filter out the commits that are not matched by commit parsers
|
|
||||||
filter_commits = false
|
|
||||||
# regex for matching git tags
|
|
||||||
# tag_pattern = "v[0-9].*"
|
|
||||||
# regex for skipping tags
|
|
||||||
# skip_tags = ""
|
|
||||||
# regex for ignoring tags
|
|
||||||
ignore_tags = ".*-nightly-.*"
|
|
||||||
# sort the tags topologically
|
|
||||||
topo_order = false
|
|
||||||
# sort the commits inside sections by oldest/newest order
|
|
||||||
sort_commits = "oldest"
|
|
||||||
# limit the number of commits included in the changelog.
|
|
||||||
# limit_commits = 42
|
|
||||||
@@ -8,6 +8,5 @@ coverage:
|
|||||||
ignore:
|
ignore:
|
||||||
- "**/error*.rs" # ignore all error.rs files
|
- "**/error*.rs" # ignore all error.rs files
|
||||||
- "tests/runner/*.rs" # ignore integration test runner
|
- "tests/runner/*.rs" # ignore integration test runner
|
||||||
- "tests-integration/**/*.rs" # ignore integration tests
|
|
||||||
comment: # this is a top-level key
|
comment: # this is a top-level key
|
||||||
layout: "diff"
|
layout: "diff"
|
||||||
|
|||||||
@@ -1,31 +0,0 @@
|
|||||||
# Configurations
|
|
||||||
|
|
||||||
- [Configurations](#configurations)
|
|
||||||
- [Standalone Mode](#standalone-mode)
|
|
||||||
- [Distributed Mode](#distributed-mode)
|
|
||||||
- [Frontend](#frontend)
|
|
||||||
- [Metasrv](#metasrv)
|
|
||||||
- [Datanode](#datanode)
|
|
||||||
- [Flownode](#flownode)
|
|
||||||
|
|
||||||
## Standalone Mode
|
|
||||||
|
|
||||||
{{ toml2docs "./standalone.example.toml" }}
|
|
||||||
|
|
||||||
## Distributed Mode
|
|
||||||
|
|
||||||
### Frontend
|
|
||||||
|
|
||||||
{{ toml2docs "./frontend.example.toml" }}
|
|
||||||
|
|
||||||
### Metasrv
|
|
||||||
|
|
||||||
{{ toml2docs "./metasrv.example.toml" }}
|
|
||||||
|
|
||||||
### Datanode
|
|
||||||
|
|
||||||
{{ toml2docs "./datanode.example.toml" }}
|
|
||||||
|
|
||||||
### Flownode
|
|
||||||
|
|
||||||
{{ toml2docs "./flownode.example.toml"}}
|
|
||||||
657
config/config.md
657
config/config.md
@@ -1,657 +0,0 @@
|
|||||||
# Configurations
|
|
||||||
|
|
||||||
- [Configurations](#configurations)
|
|
||||||
- [Standalone Mode](#standalone-mode)
|
|
||||||
- [Distributed Mode](#distributed-mode)
|
|
||||||
- [Frontend](#frontend)
|
|
||||||
- [Metasrv](#metasrv)
|
|
||||||
- [Datanode](#datanode)
|
|
||||||
- [Flownode](#flownode)
|
|
||||||
|
|
||||||
## Standalone Mode
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
|
||||||
| `mysql.tls` | -- | -- | -- |
|
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
|
||||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
|
||||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
|
||||||
| `wal` | -- | -- | The WAL options. |
|
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
|
||||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
|
||||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
|
||||||
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
|
||||||
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
|
||||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
|
||||||
| `flow` | -- | -- | flow engine options. |
|
|
||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
|
||||||
| `query` | -- | -- | The query engine options. |
|
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `storage` | -- | -- | The data storage options. |
|
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
|
||||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
|
||||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
|
||||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
|
||||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
|
||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
|
||||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
|
||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
|
||||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
|
||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
|
||||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
|
||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
|
||||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
|
||||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
|
||||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
|
||||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
|
||||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
|
||||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
|
||||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
|
||||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
|
||||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
|
||||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
## Distributed Mode
|
|
||||||
|
|
||||||
### Frontend
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
|
||||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
|
||||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
|
||||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
|
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
|
|
||||||
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
|
|
||||||
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
|
||||||
| `mysql.tls` | -- | -- | -- |
|
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
|
||||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
|
||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
|
||||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
|
||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
|
||||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
|
||||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
|
||||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
|
||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
|
||||||
| `query` | -- | -- | The query engine options. |
|
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
|
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `slow_query` | -- | -- | The slow query log options. |
|
|
||||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
|
||||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
|
||||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
|
||||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
|
||||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
|
||||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
|
||||||
|
|
||||||
|
|
||||||
### Metasrv
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
|
||||||
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
|
||||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
|
||||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
|
||||||
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
|
|
||||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
|
||||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
|
||||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
|
||||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
|
||||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
|
||||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
|
||||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
|
||||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
|
||||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
|
||||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
|
||||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
|
||||||
| `failure_detector` | -- | -- | -- |
|
|
||||||
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. |
|
|
||||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. |
|
|
||||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. |
|
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
|
||||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
|
||||||
| `wal` | -- | -- | -- |
|
|
||||||
| `wal.provider` | String | `raft_engine` | -- |
|
|
||||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
|
||||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
|
||||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
|
||||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
|
||||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
### Datanode
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
|
||||||
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
|
||||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
|
||||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
|
||||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
|
||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
|
||||||
| `wal` | -- | -- | The WAL options. |
|
|
||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka.<br/>- `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.** |
|
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
|
||||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
|
||||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
|
||||||
| `query` | -- | -- | The query engine options. |
|
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `storage` | -- | -- | The data storage options. |
|
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
|
||||||
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. |
|
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
|
||||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
|
||||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
|
||||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
|
||||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
|
||||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
|
||||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
|
||||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
|
||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
|
||||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
|
||||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
|
||||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
|
||||||
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
|
|
||||||
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
|
|
||||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
|
||||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
|
||||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
|
||||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
|
||||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
|
||||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
|
||||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
|
||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
|
||||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
|
||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
|
||||||
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
|
||||||
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
|
||||||
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
|
||||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
|
||||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
|
||||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
|
||||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
|
||||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
|
||||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
|
||||||
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
|
|
||||||
|
|
||||||
### Flownode
|
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
|
||||||
| --- | -----| ------- | ----------- |
|
|
||||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
|
||||||
| `flow` | -- | -- | flow engine options. |
|
|
||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
|
||||||
| `flow.batching_mode` | -- | -- | -- |
|
|
||||||
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
|
|
||||||
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
|
|
||||||
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
|
|
||||||
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
|
|
||||||
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
|
|
||||||
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
|
|
||||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
|
||||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
|
||||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
|
||||||
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
|
|
||||||
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
|
|
||||||
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
|
|
||||||
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
|
|
||||||
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
|
|
||||||
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
|
||||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
|
||||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
|
||||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
|
||||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
|
||||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
|
||||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
|
||||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
|
||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
|
||||||
| `logging` | -- | -- | The logging options. |
|
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
|
||||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
|
||||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
|
||||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
|
||||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
|
||||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
|
||||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
|
||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
|
||||||
| `query` | -- | -- | -- |
|
|
||||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
|
||||||
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
|
||||||
| `memory` | -- | -- | The memory options. |
|
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
|
||||||
@@ -1,724 +1,139 @@
|
|||||||
## The datanode identifier and should be unique in the cluster.
|
# Node running mode, see `standalone.example.toml`.
|
||||||
## @toml2docs:none-default
|
mode = "distributed"
|
||||||
|
# The datanode identifier, should be unique.
|
||||||
node_id = 42
|
node_id = 42
|
||||||
|
# gRPC server address, "127.0.0.1:3001" by default.
|
||||||
## The default column prefix for auto-created time index and value columns.
|
rpc_addr = "127.0.0.1:3001"
|
||||||
## @toml2docs:none-default
|
# Hostname of this node.
|
||||||
default_column_prefix = "greptime"
|
rpc_hostname = "127.0.0.1"
|
||||||
|
# The number of gRPC server worker threads, 8 by default.
|
||||||
## Start services after regions have obtained leases.
|
rpc_runtime_size = 8
|
||||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
# Start services after regions have obtained leases.
|
||||||
|
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
require_lease_before_startup = false
|
require_lease_before_startup = false
|
||||||
|
|
||||||
## Initialize all regions in the background during the startup.
|
# Initialize all regions in the background during the startup.
|
||||||
## By default, it provides services after all regions have been initialized.
|
# By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
initialize_region_in_background = false
|
||||||
|
|
||||||
## Parallelism of initializing regions.
|
|
||||||
init_regions_parallelism = 16
|
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
|
||||||
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
|
||||||
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
|
||||||
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
|
||||||
max_concurrent_queries = 0
|
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
|
||||||
#+ enable_telemetry = true
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:3001"
|
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:3001"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## The maximum receive message size for gRPC server.
|
|
||||||
max_recv_message_size = "512MB"
|
|
||||||
## The maximum send message size for gRPC server.
|
|
||||||
max_send_message_size = "512MB"
|
|
||||||
## Compression mode for datanode side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
|
||||||
[grpc.tls]
|
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
|
||||||
## For now, gRPC tls config does not support auto reload.
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## The heartbeat options.
|
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||||
interval = "3s"
|
interval = "3s"
|
||||||
|
|
||||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
# Metasrv client options.
|
||||||
retry_interval = "3s"
|
|
||||||
|
|
||||||
## The metasrv client options.
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
## The addresses of the metasrv.
|
# Metasrv address list.
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
|
# Heartbeat timeout, 500 milliseconds by default.
|
||||||
## Operation timeout.
|
heartbeat_timeout = "500ms"
|
||||||
|
# Operation timeout, 3 seconds by default.
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
|
# Connect server timeout, 1 second by default.
|
||||||
## DDL timeout.
|
|
||||||
ddl_timeout = "10s"
|
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "1s"
|
connect_timeout = "1s"
|
||||||
|
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
## The configuration about the cache of the metadata.
|
# WAL options.
|
||||||
metadata_cache_max_capacity = 100000
|
# Currently, users are expected to choose the wal through the provider field.
|
||||||
|
# When a wal provider is chose, the user should comment out all other wal config
|
||||||
## TTL of the metadata cache.
|
# except those corresponding to the chosen one.
|
||||||
metadata_cache_ttl = "10m"
|
|
||||||
|
|
||||||
# TTI of the metadata cache.
|
|
||||||
metadata_cache_tti = "5m"
|
|
||||||
|
|
||||||
## The WAL options.
|
|
||||||
[wal]
|
[wal]
|
||||||
## The provider of the WAL.
|
# WAL data directory
|
||||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
|
||||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
|
||||||
## - `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.**
|
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
## The directory to store the WAL files.
|
# Raft-engine wal options, see `standalone.example.toml`.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
# dir = "/tmp/greptimedb/wal"
|
||||||
## @toml2docs:none-default
|
file_size = "256MB"
|
||||||
dir = "./greptimedb_data/wal"
|
purge_threshold = "4GB"
|
||||||
|
purge_interval = "10m"
|
||||||
## The size of the WAL segment file.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
file_size = "128MB"
|
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a purge.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
purge_threshold = "1GB"
|
|
||||||
|
|
||||||
## The interval to trigger a purge.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
purge_interval = "1m"
|
|
||||||
|
|
||||||
## The read batch size.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
read_batch_size = 128
|
read_batch_size = 128
|
||||||
|
|
||||||
## Whether to use sync write.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
## Whether to reuse logically truncated log files.
|
# Kafka wal options, see `standalone.example.toml`.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
# broker_endpoints = ["127.0.0.1:9092"]
|
||||||
enable_log_recycle = true
|
# max_batch_size = "4MB"
|
||||||
|
# linger = "200ms"
|
||||||
|
# produce_record_timeout = "100ms"
|
||||||
|
# backoff_init = "500ms"
|
||||||
|
# backoff_max = "10s"
|
||||||
|
# backoff_base = 2
|
||||||
|
# backoff_deadline = "5mins"
|
||||||
|
|
||||||
## Whether to pre-create log files on start up.
|
# Storage options, see `standalone.example.toml`.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
prefill_log_files = false
|
|
||||||
|
|
||||||
## Duration for fsyncing log files.
|
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
|
||||||
sync_period = "10s"
|
|
||||||
|
|
||||||
## Parallelism during WAL recovery.
|
|
||||||
recovery_parallelism = 2
|
|
||||||
|
|
||||||
## The Kafka broker endpoints.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
|
||||||
|
|
||||||
## The max size of a single producer batch.
|
|
||||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
max_batch_bytes = "1MB"
|
|
||||||
|
|
||||||
## The consumer wait timeout.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
consumer_wait_timeout = "100ms"
|
|
||||||
|
|
||||||
## Whether to enable WAL index creation.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
create_index = true
|
|
||||||
|
|
||||||
## The interval for dumping WAL indexes.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
dump_index_interval = "60s"
|
|
||||||
|
|
||||||
## Ignore missing entries during read WAL.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
##
|
|
||||||
## This option ensures that when Kafka messages are deleted, the system
|
|
||||||
## can still successfully replay memtable data without throwing an
|
|
||||||
## out-of-range error.
|
|
||||||
## However, enabling this option might lead to unexpected data loss,
|
|
||||||
## as the system will skip over missing entries instead of treating
|
|
||||||
## them as critical errors.
|
|
||||||
overwrite_entry_start_id = false
|
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
|
||||||
# **It's only used when the provider is `kafka`**.
|
|
||||||
# Available SASL mechanisms:
|
|
||||||
# - `PLAIN`
|
|
||||||
# - `SCRAM-SHA-256`
|
|
||||||
# - `SCRAM-SHA-512`
|
|
||||||
# [wal.sasl]
|
|
||||||
# type = "SCRAM-SHA-512"
|
|
||||||
# username = "user_kafka"
|
|
||||||
# password = "secret"
|
|
||||||
|
|
||||||
# The Kafka TLS configuration.
|
|
||||||
# **It's only used when the provider is `kafka`**.
|
|
||||||
# [wal.tls]
|
|
||||||
# server_ca_cert_path = "/path/to/server_cert"
|
|
||||||
# client_cert_path = "/path/to/client_cert"
|
|
||||||
# client_key_path = "/path/to/key"
|
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
|
||||||
# [storage]
|
|
||||||
# type = "S3"
|
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# access_key_id = "test"
|
|
||||||
# secret_access_key = "123456"
|
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
|
||||||
# region = "us-west-2"
|
|
||||||
# enable_virtual_host_style = false
|
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
|
||||||
# [storage]
|
|
||||||
# type = "Oss"
|
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# access_key_id = "test"
|
|
||||||
# access_key_secret = "123456"
|
|
||||||
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
|
||||||
|
|
||||||
# Example of using Azblob as the storage.
|
|
||||||
# [storage]
|
|
||||||
# type = "Azblob"
|
|
||||||
# container = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# account_name = "test"
|
|
||||||
# account_key = "123456"
|
|
||||||
# endpoint = "https://greptimedb.blob.core.windows.net"
|
|
||||||
# sas_token = ""
|
|
||||||
|
|
||||||
# Example of using Gcs as the storage.
|
|
||||||
# [storage]
|
|
||||||
# type = "Gcs"
|
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# scope = "test"
|
|
||||||
# credential_path = "123456"
|
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
|
||||||
|
|
||||||
## The query engine options.
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine.
|
|
||||||
## Default to 0, which means the number of CPU cores.
|
|
||||||
parallelism = 0
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans.
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## The data storage options.
|
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
# The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
# Storage type.
|
||||||
## The storage type used to store the data.
|
|
||||||
## - `File`: the data is stored in the local file system.
|
|
||||||
## - `S3`: the data is stored in the S3 object storage.
|
|
||||||
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
|
||||||
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
|
||||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
|
||||||
type = "File"
|
type = "File"
|
||||||
|
# TTL for all tables. Disabled by default.
|
||||||
|
# global_ttl = "7d"
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
# Cache configuration for object storage such as 'S3' etc.
|
||||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
# The local file cache directory
|
||||||
## @toml2docs:none-default
|
# cache_path = "/path/local_cache"
|
||||||
#+ cache_path = ""
|
# The local file cache capacity in bytes.
|
||||||
|
# cache_capacity = "256MB"
|
||||||
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage.
|
|
||||||
#+ enable_read_cache = true
|
|
||||||
|
|
||||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cache_capacity = "5GiB"
|
|
||||||
|
|
||||||
## The S3 bucket name.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
bucket = "greptimedb"
|
|
||||||
|
|
||||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
root = "greptimedb"
|
|
||||||
|
|
||||||
## The access key id of the aws account.
|
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
|
||||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
access_key_id = "test"
|
|
||||||
|
|
||||||
## The secret access key of the aws account.
|
|
||||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
|
||||||
## **It's only used when the storage type is `S3`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
secret_access_key = "test"
|
|
||||||
|
|
||||||
## The secret access key of the aliyun account.
|
|
||||||
## **It's only used when the storage type is `Oss`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
access_key_secret = "test"
|
|
||||||
|
|
||||||
## The account key of the azure account.
|
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
account_name = "test"
|
|
||||||
|
|
||||||
## The account key of the azure account.
|
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
account_key = "test"
|
|
||||||
|
|
||||||
## The scope of the google cloud storage.
|
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
scope = "test"
|
|
||||||
|
|
||||||
## The credential path of the google cloud storage.
|
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
credential_path = "test"
|
|
||||||
|
|
||||||
## The credential of the google cloud storage.
|
|
||||||
## **It's only used when the storage type is `Gcs`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
credential = "base64-credential"
|
|
||||||
|
|
||||||
## The container of the azure account.
|
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
container = "greptimedb"
|
|
||||||
|
|
||||||
## The sas token of the azure account.
|
|
||||||
## **It's only used when the storage type is `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
sas_token = ""
|
|
||||||
|
|
||||||
## The endpoint of the S3 service.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
endpoint = "https://s3.amazonaws.com"
|
|
||||||
|
|
||||||
## The region of the S3 service.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
region = "us-west-2"
|
|
||||||
|
|
||||||
## The http client options to the storage.
|
|
||||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
|
||||||
[storage.http_client]
|
|
||||||
|
|
||||||
## The maximum idle connection per host allowed in the pool.
|
|
||||||
pool_max_idle_per_host = 1024
|
|
||||||
|
|
||||||
## The timeout for only the connect phase of a http client.
|
|
||||||
connect_timeout = "30s"
|
|
||||||
|
|
||||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
|
||||||
## Also considered a total deadline.
|
|
||||||
timeout = "30s"
|
|
||||||
|
|
||||||
## The timeout for idle sockets being kept-alive.
|
|
||||||
pool_idle_timeout = "90s"
|
|
||||||
|
|
||||||
## To skip the ssl verification
|
|
||||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
|
||||||
skip_ssl_validation = false
|
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
#[[storage.providers]]
|
||||||
# name = "S3"
|
#type = "S3"
|
||||||
# type = "S3"
|
#[[storage.providers]]
|
||||||
# bucket = "greptimedb"
|
#type = "Gcs"
|
||||||
# root = "data"
|
|
||||||
# access_key_id = "test"
|
|
||||||
# secret_access_key = "123456"
|
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
|
||||||
# region = "us-west-2"
|
|
||||||
# [[storage.providers]]
|
|
||||||
# name = "Gcs"
|
|
||||||
# type = "Gcs"
|
|
||||||
# bucket = "greptimedb"
|
|
||||||
# root = "data"
|
|
||||||
# scope = "test"
|
|
||||||
# credential_path = "123456"
|
|
||||||
# credential = "base64-credential"
|
|
||||||
# endpoint = "https://storage.googleapis.com"
|
|
||||||
|
|
||||||
## The region engine options. You can configure multiple region engines.
|
# Mito engine options
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
|
|
||||||
## The Mito engine options.
|
|
||||||
[region_engine.mito]
|
[region_engine.mito]
|
||||||
|
# Number of region workers
|
||||||
## Number of region workers.
|
num_workers = 8
|
||||||
#+ num_workers = 8
|
# Request channel size of each worker
|
||||||
|
|
||||||
## Request channel size of each worker.
|
|
||||||
worker_channel_size = 128
|
worker_channel_size = 128
|
||||||
|
# Max batch size for a worker to handle requests
|
||||||
## Max batch size for a worker to handle requests.
|
|
||||||
worker_request_batch_size = 64
|
worker_request_batch_size = 64
|
||||||
|
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
|
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
|
|
||||||
## Number of removed files to keep in manifest's `removed_files` field before also
|
|
||||||
## remove them from `removed_files`. Mostly for debugging purpose.
|
|
||||||
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
|
|
||||||
## from `removed_files` field.
|
|
||||||
experimental_manifest_keep_removed_file_count = 256
|
|
||||||
|
|
||||||
## How long to keep removed files in the `removed_files` field of manifest
|
|
||||||
## after they are removed from manifest.
|
|
||||||
## files will only be removed from `removed_files` field
|
|
||||||
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
|
|
||||||
experimental_manifest_keep_removed_file_ttl = "1h"
|
|
||||||
|
|
||||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
|
||||||
compress_manifest = false
|
compress_manifest = false
|
||||||
|
# Max number of running background jobs
|
||||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
max_background_jobs = 4
|
||||||
## @toml2docs:none-default="Auto"
|
# Interval to auto flush a region if it has not flushed yet.
|
||||||
#+ max_background_flushes = 4
|
|
||||||
|
|
||||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_compactions = 2
|
|
||||||
|
|
||||||
## Max number of running background purge jobs (default: number of cpu cores).
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ max_background_purges = 8
|
|
||||||
|
|
||||||
## Interval to auto flush a region if it has not flushed yet.
|
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
|
# Global write buffer size for all regions.
|
||||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
global_write_buffer_size = "1GB"
|
||||||
## @toml2docs:none-default="Auto"
|
# Global write buffer size threshold to reject write requests (default 2G).
|
||||||
#+ global_write_buffer_size = "1GB"
|
global_write_buffer_reject_size = "2GB"
|
||||||
|
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
sst_meta_cache_size = "128MB"
|
||||||
## @toml2docs:none-default="Auto"
|
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||||
#+ global_write_buffer_reject_size = "2GB"
|
vector_cache_size = "512MB"
|
||||||
|
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
page_cache_size = "512MB"
|
||||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
# Buffer size for SST writing.
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ sst_meta_cache_size = "128MB"
|
|
||||||
|
|
||||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ vector_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
|
||||||
## If not set, it's default to 1/8 of OS memory.
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ page_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
|
||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
|
||||||
## @toml2docs:none-default="Auto"
|
|
||||||
#+ selector_result_cache_size = "512MB"
|
|
||||||
|
|
||||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
|
||||||
enable_write_cache = false
|
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}`.
|
|
||||||
write_cache_path = ""
|
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
|
||||||
write_cache_size = "5GiB"
|
|
||||||
|
|
||||||
## TTL for write cache.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
write_cache_ttl = "8h"
|
|
||||||
|
|
||||||
## Preload index (puffin) files into cache on region open (default: true).
|
|
||||||
## When enabled, index files are loaded into the write cache during region initialization,
|
|
||||||
## which can improve query performance at the cost of longer startup times.
|
|
||||||
preload_index_cache = true
|
|
||||||
|
|
||||||
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
|
||||||
## The remaining capacity is used for data (parquet) files.
|
|
||||||
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
|
||||||
## 1GiB is reserved for index files and 4GiB for data files.
|
|
||||||
index_cache_percent = 20
|
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
# - 0: using the default value (1/4 of cpu cores).
|
||||||
|
# - 1: scan in current thread.
|
||||||
|
# - n: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Maximum number of SST files to scan concurrently.
|
# Log options, see `standalone.example.toml`
|
||||||
max_concurrent_scan_files = 384
|
# [logging]
|
||||||
|
# dir = "/tmp/greptimedb/logs"
|
||||||
|
# level = "info"
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
# Datanode export the metrics generated by itself
|
||||||
allow_stale_entries = false
|
# encoded to Prometheus remote-write format
|
||||||
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
## Memory limit for table scans across all queries.
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
# [export_metrics]
|
||||||
## Setting it to 0 disables the limit.
|
# whether enable export metrics, default is false
|
||||||
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
# enable = false
|
||||||
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
# endpoint = "127.0.0.1:4000"
|
||||||
scan_memory_limit = "50%"
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
|
# db = ""
|
||||||
## Minimum time interval between two compactions.
|
# The interval of export metrics
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
# write_interval = "30s"
|
||||||
min_compaction_interval = "0m"
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
## Whether to enable experimental flat format as the default format.
|
|
||||||
default_experimental_flat_format = false
|
|
||||||
|
|
||||||
## The options for index in Mito engine.
|
|
||||||
[region_engine.mito.index]
|
|
||||||
|
|
||||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
|
||||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
|
||||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
|
||||||
##
|
|
||||||
## This path contains two subdirectories:
|
|
||||||
## - `__intm`: for storing intermediate files used during creating index.
|
|
||||||
## - `staging`: for storing staging files used during searching index.
|
|
||||||
aux_path = ""
|
|
||||||
|
|
||||||
## The max capacity of the staging directory.
|
|
||||||
staging_size = "2GB"
|
|
||||||
|
|
||||||
## The TTL of the staging directory.
|
|
||||||
## Defaults to 7 days.
|
|
||||||
## Setting it to "0s" to disable TTL.
|
|
||||||
staging_ttl = "7d"
|
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## Cache size for index result.
|
|
||||||
result_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
|
||||||
[region_engine.mito.inverted_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for performing an external sort during index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
|
||||||
intermediate_path = ""
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
|
||||||
[region_engine.mito.fulltext_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
## The options for bloom filter index in Mito engine.
|
|
||||||
[region_engine.mito.bloom_filter_index]
|
|
||||||
|
|
||||||
## Whether to create the index on flush.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_flush = "auto"
|
|
||||||
|
|
||||||
## Whether to create the index on compaction.
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
create_on_compaction = "auto"
|
|
||||||
|
|
||||||
## Whether to apply the index on query
|
|
||||||
## - `auto`: automatically (default)
|
|
||||||
## - `disable`: never
|
|
||||||
apply_on_query = "auto"
|
|
||||||
|
|
||||||
## Memory threshold for the index creation.
|
|
||||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
|
||||||
## - `unlimited`: no memory limit
|
|
||||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
|
||||||
mem_threshold_on_create = "auto"
|
|
||||||
|
|
||||||
[region_engine.mito.memtable]
|
|
||||||
## Memtable type.
|
|
||||||
## - `time_series`: time-series memtable
|
|
||||||
## - `partition_tree`: partition tree memtable (experimental)
|
|
||||||
type = "time_series"
|
|
||||||
|
|
||||||
## The max number of keys in one shard.
|
|
||||||
## Only available for `partition_tree` memtable.
|
|
||||||
index_max_keys_per_shard = 8192
|
|
||||||
|
|
||||||
## The max rows of data inside the actively writing buffer in one shard.
|
|
||||||
## Only available for `partition_tree` memtable.
|
|
||||||
data_freeze_threshold = 32768
|
|
||||||
|
|
||||||
## Max dictionary bytes.
|
|
||||||
## Only available for `partition_tree` memtable.
|
|
||||||
fork_dictionary_bytes = "1GiB"
|
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Enable the file engine.
|
|
||||||
[region_engine.file]
|
|
||||||
|
|
||||||
[[region_engine]]
|
|
||||||
## Metric engine options.
|
|
||||||
[region_engine.metric]
|
|
||||||
## Whether to use sparse primary key encoding.
|
|
||||||
sparse_primary_key_encoding = true
|
|
||||||
|
|
||||||
## The logging options.
|
|
||||||
[logging]
|
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
|
||||||
dir = "./greptimedb_data/logs"
|
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
|
||||||
enable_otlp_tracing = false
|
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
|
||||||
append_stdout = true
|
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
||||||
[logging.tracing_sample_ratio]
|
|
||||||
default_ratio = 1.0
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
||||||
#+ [tracing]
|
|
||||||
## The tokio console address.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
|
|||||||
@@ -1,171 +0,0 @@
|
|||||||
## The flownode identifier and should be unique in the cluster.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
node_id = 14
|
|
||||||
|
|
||||||
## flow engine options.
|
|
||||||
[flow]
|
|
||||||
## The number of flow worker in flownode.
|
|
||||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
|
||||||
#+num_workers=0
|
|
||||||
[flow.batching_mode]
|
|
||||||
## The default batching engine query timeout is 10 minutes.
|
|
||||||
#+query_timeout="600s"
|
|
||||||
## will output a warn log for any query that runs for more that this threshold
|
|
||||||
#+slow_query_threshold="60s"
|
|
||||||
## The minimum duration between two queries execution by batching mode task
|
|
||||||
#+experimental_min_refresh_duration="5s"
|
|
||||||
## The gRPC connection timeout
|
|
||||||
#+grpc_conn_timeout="5s"
|
|
||||||
## The gRPC max retry number
|
|
||||||
#+experimental_grpc_max_retries=3
|
|
||||||
## Flow wait for available frontend timeout,
|
|
||||||
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
|
|
||||||
## which prevent flownode from starting
|
|
||||||
#+experimental_frontend_scan_timeout="30s"
|
|
||||||
## Frontend activity timeout
|
|
||||||
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
|
|
||||||
## it will be removed from the list that flownode use to connect
|
|
||||||
#+experimental_frontend_activity_timeout="60s"
|
|
||||||
## Maximum number of filters allowed in a single query
|
|
||||||
#+experimental_max_filter_num_per_query=20
|
|
||||||
## Time window merge distance
|
|
||||||
#+experimental_time_window_merge_threshold=3
|
|
||||||
## Read preference of the Frontend client.
|
|
||||||
#+read_preference="Leader"
|
|
||||||
[flow.batching_mode.frontend_tls]
|
|
||||||
## Whether to enable TLS for client.
|
|
||||||
#+enabled=false
|
|
||||||
## Server Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+server_ca_cert_path=""
|
|
||||||
## Client Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+client_cert_path=""
|
|
||||||
## Client Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+client_key_path=""
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:6800"
|
|
||||||
## The address advertised to the metasrv,
|
|
||||||
## and used for connections from outside the host
|
|
||||||
server_addr = "127.0.0.1:6800"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
|
||||||
## The maximum receive message size for gRPC server.
|
|
||||||
max_recv_message_size = "512MB"
|
|
||||||
## The maximum send message size for gRPC server.
|
|
||||||
max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## The metasrv client options.
|
|
||||||
[meta_client]
|
|
||||||
## The addresses of the metasrv.
|
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
|
||||||
|
|
||||||
## Operation timeout.
|
|
||||||
timeout = "3s"
|
|
||||||
|
|
||||||
## DDL timeout.
|
|
||||||
ddl_timeout = "10s"
|
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "1s"
|
|
||||||
|
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
|
||||||
|
|
||||||
## The configuration about the cache of the metadata.
|
|
||||||
metadata_cache_max_capacity = 100000
|
|
||||||
|
|
||||||
## TTL of the metadata cache.
|
|
||||||
metadata_cache_ttl = "10m"
|
|
||||||
|
|
||||||
# TTI of the metadata cache.
|
|
||||||
metadata_cache_tti = "5m"
|
|
||||||
|
|
||||||
## The heartbeat options.
|
|
||||||
[heartbeat]
|
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
|
||||||
interval = "3s"
|
|
||||||
|
|
||||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
|
||||||
retry_interval = "3s"
|
|
||||||
|
|
||||||
## The logging options.
|
|
||||||
[logging]
|
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
|
||||||
dir = "./greptimedb_data/logs"
|
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
|
||||||
enable_otlp_tracing = false
|
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
|
||||||
append_stdout = true
|
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
||||||
[logging.tracing_sample_ratio]
|
|
||||||
default_ratio = 1.0
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
||||||
#+ [tracing]
|
|
||||||
## The tokio console address.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine for query sent by flownode.
|
|
||||||
## Default to 1, so it won't use too much cpu or memory
|
|
||||||
parallelism = 1
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans.
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
@@ -1,346 +1,97 @@
|
|||||||
## The default timezone of the server.
|
# Node running mode, see `standalone.example.toml`.
|
||||||
## @toml2docs:none-default
|
mode = "distributed"
|
||||||
default_timezone = "UTC"
|
# The default timezone of the server
|
||||||
|
# default_timezone = "UTC"
|
||||||
|
|
||||||
## The default column prefix for auto-created time index and value columns.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
default_column_prefix = "greptime"
|
|
||||||
|
|
||||||
## The maximum in-flight write bytes.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_in_flight_write_bytes = "500MB"
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## The heartbeat options.
|
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||||
interval = "18s"
|
interval = "5s"
|
||||||
|
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||||
|
retry_interval = "5s"
|
||||||
|
|
||||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
# HTTP server options, see `standalone.example.toml`.
|
||||||
retry_interval = "3s"
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
addr = "127.0.0.1:4000"
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
timeout = "30s"
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
## Maximum total memory for all concurrent HTTP request bodies.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_body_memory = "1GB"
|
|
||||||
## HTTP CORS support, it's turned on by default
|
|
||||||
## This allows browser to access http APIs without CORS restrictions
|
|
||||||
enable_cors = true
|
|
||||||
## Customize allowed origins for HTTP CORS.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cors_allowed_origins = ["https://example.com"]
|
|
||||||
## Whether to enable validation for Prometheus remote write requests.
|
|
||||||
## Available options:
|
|
||||||
## - strict: deny invalid UTF-8 strings (default).
|
|
||||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
|
||||||
## - unchecked: do not valid strings.
|
|
||||||
prom_validation_mode = "strict"
|
|
||||||
|
|
||||||
## The gRPC server options.
|
# gRPC server options, see `standalone.example.toml`.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
addr = "127.0.0.1:4001"
|
||||||
bind_addr = "127.0.0.1:4001"
|
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:4001"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## Maximum total memory for all concurrent gRPC request messages.
|
|
||||||
## Set to 0 to disable the limit. Default: "0" (unlimited)
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_total_message_memory = "1GB"
|
|
||||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
## The maximum connection age for gRPC connection.
|
|
||||||
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
|
|
||||||
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ max_connection_age = "10m"
|
|
||||||
|
|
||||||
## gRPC server TLS options, see `mysql.tls` section.
|
# MySQL server options, see `standalone.example.toml`.
|
||||||
[grpc.tls]
|
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
|
||||||
## For now, gRPC tls config does not support auto reload.
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
|
|
||||||
[internal_grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:4010"
|
|
||||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:4010"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
|
||||||
## - `none`: disable all compression
|
|
||||||
## - `transport`: only enable gRPC transport compression (zstd)
|
|
||||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
|
||||||
## - `all`: enable all compression.
|
|
||||||
## Default to `none`
|
|
||||||
flight_compression = "arrow_ipc"
|
|
||||||
|
|
||||||
## internal gRPC server TLS options, see `mysql.tls` section.
|
|
||||||
[internal_grpc.tls]
|
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload.
|
|
||||||
## For now, gRPC tls config does not support auto reload.
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
|
|
||||||
## MySQL server options.
|
|
||||||
[mysql]
|
[mysql]
|
||||||
## Whether to enable.
|
|
||||||
enable = true
|
enable = true
|
||||||
## The addr to bind the MySQL server.
|
|
||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
|
||||||
prepared_stmt_cache_size = 10000
|
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options, see `standalone.example.toml`.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
|
|
||||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
|
||||||
## - `disable` (default value)
|
|
||||||
## - `prefer`
|
|
||||||
## - `require`
|
|
||||||
## - `verify-ca`
|
|
||||||
## - `verify-full`
|
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
# PostgresSQL server options, see `standalone.example.toml`.
|
||||||
watch = false
|
|
||||||
|
|
||||||
## PostgresSQL server options.
|
|
||||||
[postgres]
|
[postgres]
|
||||||
## Whether to enable
|
|
||||||
enable = true
|
enable = true
|
||||||
## The addr to bind the PostgresSQL server.
|
|
||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## Server-side keep-alive time.
|
|
||||||
## Set to 0 (default) to disable.
|
|
||||||
keep_alive = "0s"
|
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
## TLS mode.
|
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
|
|
||||||
## Certificate file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
cert_path = ""
|
cert_path = ""
|
||||||
|
|
||||||
## Private key file path.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
key_path = ""
|
key_path = ""
|
||||||
|
|
||||||
## Watch for Certificate and key file change and auto reload
|
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||||
watch = false
|
|
||||||
|
|
||||||
## OpenTSDB protocol options.
|
|
||||||
[opentsdb]
|
[opentsdb]
|
||||||
## Whether to enable OpenTSDB put in HTTP API.
|
|
||||||
enable = true
|
enable = true
|
||||||
|
addr = "127.0.0.1:4242"
|
||||||
|
runtime_size = 2
|
||||||
|
|
||||||
## InfluxDB protocol options.
|
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||||
[influxdb]
|
[influxdb]
|
||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
## Jaeger protocol options.
|
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||||
[jaeger]
|
|
||||||
## Whether to enable Jaeger protocol in HTTP API.
|
|
||||||
enable = true
|
|
||||||
|
|
||||||
## Prometheus remote storage options
|
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
|
||||||
enable = true
|
enable = true
|
||||||
## Whether to store the data from Prometheus remote write in metric engine.
|
|
||||||
with_metric_engine = true
|
|
||||||
|
|
||||||
## The metasrv client options.
|
# Metasrv client options, see `datanode.example.toml`.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
## The addresses of the metasrv.
|
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
|
|
||||||
## Operation timeout.
|
|
||||||
timeout = "3s"
|
timeout = "3s"
|
||||||
|
# DDL timeouts options.
|
||||||
## DDL timeout.
|
|
||||||
ddl_timeout = "10s"
|
ddl_timeout = "10s"
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "1s"
|
connect_timeout = "1s"
|
||||||
|
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
## The configuration about the cache of the metadata.
|
# Log options, see `standalone.example.toml`
|
||||||
metadata_cache_max_capacity = 100000
|
# [logging]
|
||||||
|
# dir = "/tmp/greptimedb/logs"
|
||||||
|
# level = "info"
|
||||||
|
|
||||||
## TTL of the metadata cache.
|
# Datanode options.
|
||||||
metadata_cache_ttl = "10m"
|
|
||||||
|
|
||||||
# TTI of the metadata cache.
|
|
||||||
metadata_cache_tti = "5m"
|
|
||||||
|
|
||||||
## The query engine options.
|
|
||||||
[query]
|
|
||||||
## Parallelism of the query engine.
|
|
||||||
## Default to 0, which means the number of CPU cores.
|
|
||||||
parallelism = 0
|
|
||||||
## Whether to allow query fallback when push down optimize fails.
|
|
||||||
## Default to false, meaning when push down optimize failed, return error msg
|
|
||||||
allow_query_fallback = false
|
|
||||||
|
|
||||||
## Memory pool size for query execution operators (aggregation, sorting, join).
|
|
||||||
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
|
|
||||||
## Setting it to 0 disables the limit (unbounded, default behavior).
|
|
||||||
## When this limit is reached, queries will fail with ResourceExhausted error.
|
|
||||||
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
|
|
||||||
memory_pool_size = "50%"
|
|
||||||
|
|
||||||
## Datanode options.
|
|
||||||
[datanode]
|
[datanode]
|
||||||
## Datanode client options.
|
# Datanode client options.
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
|
timeout = "10s"
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
## The logging options.
|
# Frontend export the metrics generated by itself
|
||||||
[logging]
|
# encoded to Prometheus remote-write format
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
dir = "./greptimedb_data/logs"
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
# whether enable export metrics, default is false
|
||||||
## @toml2docs:none-default
|
# enable = false
|
||||||
level = "info"
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
|
# endpoint = "127.0.0.1:4000"
|
||||||
## Enable OTLP tracing.
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
enable_otlp_tracing = false
|
# db = ""
|
||||||
|
# The interval of export metrics
|
||||||
## The OTLP tracing endpoint.
|
# write_interval = "30s"
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
## Whether to append logs to stdout.
|
|
||||||
append_stdout = true
|
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
||||||
[logging.tracing_sample_ratio]
|
|
||||||
default_ratio = 1.0
|
|
||||||
|
|
||||||
## The slow query log options.
|
|
||||||
[slow_query]
|
|
||||||
## Whether to enable slow query log.
|
|
||||||
enable = true
|
|
||||||
|
|
||||||
## The record type of slow queries. It can be `system_table` or `log`.
|
|
||||||
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
|
|
||||||
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
|
|
||||||
record_type = "system_table"
|
|
||||||
|
|
||||||
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
|
|
||||||
threshold = "30s"
|
|
||||||
|
|
||||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
|
||||||
sample_ratio = 1.0
|
|
||||||
|
|
||||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
|
||||||
ttl = "90d"
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
||||||
#+ [tracing]
|
|
||||||
## The tokio console address.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
|
|
||||||
## Configuration options for the event recorder.
|
|
||||||
[event_recorder]
|
|
||||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
|
||||||
ttl = "90d"
|
|
||||||
|
|||||||
@@ -1,346 +1,96 @@
|
|||||||
## The working home directory.
|
# The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "/tmp/metasrv/"
|
||||||
|
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||||
## Store server address(es). The format depends on the selected backend.
|
bind_addr = "127.0.0.1:3002"
|
||||||
##
|
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||||
## For etcd: a list of "host:port" endpoints.
|
server_addr = "127.0.0.1:3002"
|
||||||
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
# Etcd server address, "127.0.0.1:2379" by default.
|
||||||
##
|
store_addr = "127.0.0.1:2379"
|
||||||
## For PostgreSQL: a connection string in libpq format or URI.
|
# Datanode selector type.
|
||||||
## e.g.
|
# - "lease_based" (default value).
|
||||||
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
# - "load_based"
|
||||||
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||||
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
selector = "lease_based"
|
||||||
##
|
# Store data in memory, false by default.
|
||||||
## For mysql store, the format is a MySQL connection URL.
|
use_memory_store = false
|
||||||
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
# Whether to enable greptimedb telemetry, true by default.
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
enable_telemetry = true
|
||||||
|
# If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
|
||||||
store_key_prefix = ""
|
store_key_prefix = ""
|
||||||
|
|
||||||
## The datastore for meta server.
|
# Log options, see `standalone.example.toml`
|
||||||
## Available values:
|
# [logging]
|
||||||
## - `etcd_store` (default value)
|
# dir = "/tmp/greptimedb/logs"
|
||||||
## - `memory_store`
|
# level = "info"
|
||||||
## - `postgres_store`
|
|
||||||
## - `mysql_store`
|
|
||||||
backend = "etcd_store"
|
|
||||||
|
|
||||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
# Procedure storage options.
|
||||||
## **Only used when backend is `postgres_store`.**
|
|
||||||
meta_table_name = "greptime_metakv"
|
|
||||||
|
|
||||||
## Optional PostgreSQL schema for metadata table and election table name qualification.
|
|
||||||
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
|
|
||||||
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
|
|
||||||
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
|
|
||||||
## **Only used when backend is `postgres_store`.**
|
|
||||||
|
|
||||||
meta_schema_name = "greptime_schema"
|
|
||||||
|
|
||||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
|
||||||
## Only used when backend is `postgres_store`.
|
|
||||||
meta_election_lock_id = 1
|
|
||||||
|
|
||||||
## Datanode selector type.
|
|
||||||
## - `round_robin` (default value)
|
|
||||||
## - `lease_based`
|
|
||||||
## - `load_based`
|
|
||||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
|
||||||
selector = "round_robin"
|
|
||||||
|
|
||||||
## Store data in memory.
|
|
||||||
use_memory_store = false
|
|
||||||
|
|
||||||
## Whether to enable region failover.
|
|
||||||
## This feature is only available on GreptimeDB running on cluster mode and
|
|
||||||
## - Using Remote WAL
|
|
||||||
## - Using shared storage (e.g., s3).
|
|
||||||
enable_region_failover = false
|
|
||||||
|
|
||||||
## The delay before starting region failure detection.
|
|
||||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
|
||||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
|
||||||
region_failure_detector_initialization_delay = '10m'
|
|
||||||
|
|
||||||
## Whether to allow region failover on local WAL.
|
|
||||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
|
||||||
allow_region_failover_on_local_wal = false
|
|
||||||
|
|
||||||
## Max allowed idle time before removing node info from metasrv memory.
|
|
||||||
node_max_idle_time = "24hours"
|
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
|
||||||
#+ enable_telemetry = true
|
|
||||||
|
|
||||||
## The runtime options.
|
|
||||||
#+ [runtime]
|
|
||||||
## The number of threads to execute the runtime for global read operations.
|
|
||||||
#+ global_rt_size = 8
|
|
||||||
## The number of threads to execute the runtime for global write operations.
|
|
||||||
#+ compact_rt_size = 4
|
|
||||||
|
|
||||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
|
||||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
|
||||||
##
|
|
||||||
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
|
||||||
## settings here will override the TLS settings in `store_addrs`.
|
|
||||||
[backend_tls]
|
|
||||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
|
||||||
## - "disable" - No TLS
|
|
||||||
## - "prefer" (default) - Try TLS, fallback to plain
|
|
||||||
## - "require" - Require TLS
|
|
||||||
## - "verify_ca" - Require TLS and verify CA
|
|
||||||
## - "verify_full" - Require TLS and verify hostname
|
|
||||||
mode = "prefer"
|
|
||||||
|
|
||||||
## Path to client certificate file (for client authentication)
|
|
||||||
## Like "/path/to/client.crt"
|
|
||||||
cert_path = ""
|
|
||||||
|
|
||||||
## Path to client private key file (for client authentication)
|
|
||||||
## Like "/path/to/client.key"
|
|
||||||
key_path = ""
|
|
||||||
|
|
||||||
## Path to CA certificate file (for server certificate verification)
|
|
||||||
## Required when using custom CAs or self-signed certificates
|
|
||||||
## Leave empty to use system root certificates only
|
|
||||||
## Like "/path/to/ca.crt"
|
|
||||||
ca_cert_path = ""
|
|
||||||
|
|
||||||
## The gRPC server options.
|
|
||||||
[grpc]
|
|
||||||
## The address to bind the gRPC server.
|
|
||||||
bind_addr = "127.0.0.1:3002"
|
|
||||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
|
||||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
|
||||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
|
||||||
server_addr = "127.0.0.1:3002"
|
|
||||||
## The number of server worker threads.
|
|
||||||
runtime_size = 8
|
|
||||||
## The maximum receive message size for gRPC server.
|
|
||||||
max_recv_message_size = "512MB"
|
|
||||||
## The maximum send message size for gRPC server.
|
|
||||||
max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## The HTTP server options.
|
|
||||||
[http]
|
|
||||||
## The address to bind the HTTP server.
|
|
||||||
addr = "127.0.0.1:4000"
|
|
||||||
## HTTP request timeout. Set to 0 to disable timeout.
|
|
||||||
timeout = "0s"
|
|
||||||
## HTTP request body limit.
|
|
||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
|
||||||
## Set to 0 to disable limit.
|
|
||||||
body_limit = "64MB"
|
|
||||||
|
|
||||||
## Procedure storage options.
|
|
||||||
[procedure]
|
[procedure]
|
||||||
|
# Procedure max retry time.
|
||||||
## Procedure max retry time.
|
|
||||||
max_retry_times = 12
|
max_retry_times = 12
|
||||||
|
# Initial retry delay of procedures, increases exponentially
|
||||||
## Initial retry delay of procedures, increases exponentially
|
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
## Auto split large value
|
|
||||||
## GreptimeDB procedure uses etcd as the default metadata storage backend.
|
|
||||||
## The etcd the maximum size of any request is 1.5 MiB
|
|
||||||
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
|
|
||||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
|
||||||
max_metadata_value_size = "1500KiB"
|
|
||||||
|
|
||||||
## Max running procedures.
|
|
||||||
## The maximum number of procedures that can be running at the same time.
|
|
||||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
|
||||||
max_running_procedures = 128
|
|
||||||
|
|
||||||
# Failure detectors options.
|
# Failure detectors options.
|
||||||
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
|
|
||||||
[failure_detector]
|
[failure_detector]
|
||||||
## Maximum acceptable φ before the peer is treated as failed.
|
|
||||||
## Lower values react faster but yield more false positives.
|
|
||||||
threshold = 8.0
|
threshold = 8.0
|
||||||
## The minimum standard deviation of the heartbeat intervals.
|
|
||||||
## So tiny variations don’t make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary.
|
|
||||||
min_std_deviation = "100ms"
|
min_std_deviation = "100ms"
|
||||||
## The acceptable pause duration between heartbeats.
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses.
|
first_heartbeat_estimate = "1000ms"
|
||||||
acceptable_heartbeat_pause = "10000ms"
|
|
||||||
|
|
||||||
## Datanode options.
|
# # Datanode options.
|
||||||
[datanode]
|
# [datanode]
|
||||||
|
# # Datanode client options.
|
||||||
## Datanode client options.
|
# [datanode.client_options]
|
||||||
[datanode.client]
|
# timeout = "10s"
|
||||||
|
# connect_timeout = "10s"
|
||||||
## Operation timeout.
|
# tcp_nodelay = true
|
||||||
timeout = "10s"
|
|
||||||
|
|
||||||
## Connect server timeout.
|
|
||||||
connect_timeout = "10s"
|
|
||||||
|
|
||||||
## `TCP_NODELAY` option for accepted connections.
|
|
||||||
tcp_nodelay = true
|
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
# Available wal providers:
|
# Available wal providers:
|
||||||
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
|
# - "raft_engine" (default)
|
||||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
# - "kafka"
|
||||||
provider = "raft_engine"
|
provider = "raft_engine"
|
||||||
|
|
||||||
## The broker endpoints of the Kafka cluster.
|
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||||
##
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
broker_endpoints = ["127.0.0.1:9092"]
|
|
||||||
|
|
||||||
## Automatically create topics for WAL.
|
# Kafka wal config.
|
||||||
## Set to `true` to automatically create topics for WAL.
|
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
# broker_endpoints = ["127.0.0.1:9092"]
|
||||||
## **It's only used when the provider is `kafka`**.
|
# Number of topics to be created upon start.
|
||||||
auto_create_topics = true
|
# num_topics = 64
|
||||||
|
# Topic selector type.
|
||||||
|
# Available selector types:
|
||||||
|
# - "round_robin" (default)
|
||||||
|
# selector_type = "round_robin"
|
||||||
|
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
# topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
# Number of partitions per topic.
|
||||||
|
# num_partitions = 1
|
||||||
|
# Expected number of replicas of each partition.
|
||||||
|
# replication_factor = 1
|
||||||
|
# Above which a topic creation operation will be cancelled.
|
||||||
|
# create_topic_timeout = "30s"
|
||||||
|
# The initial backoff for kafka clients.
|
||||||
|
# backoff_init = "500ms"
|
||||||
|
# The maximum backoff for kafka clients.
|
||||||
|
# backoff_max = "10s"
|
||||||
|
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
# backoff_base = 2
|
||||||
|
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
|
# backoff_deadline = "5mins"
|
||||||
|
|
||||||
## Interval of automatically WAL pruning.
|
# Metasrv export the metrics generated by itself
|
||||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
# encoded to Prometheus remote-write format
|
||||||
## **It's only used when the provider is `kafka`**.
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
auto_prune_interval = "30m"
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
|
# whether enable export metrics, default is false
|
||||||
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
|
# enable = false
|
||||||
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
## (latest_entry_id - flushed_entry_id) * avg_record_size
|
# endpoint = "127.0.0.1:4000"
|
||||||
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
## - `latest_entry_id`: The latest entry ID in the topic.
|
# db = ""
|
||||||
## - `flushed_entry_id`: The last flushed entry ID for the region.
|
# The interval of export metrics
|
||||||
## Set to "0" to let the system decide the flush trigger size.
|
# write_interval = "30s"
|
||||||
## **It's only used when the provider is `kafka`**.
|
# HTTP headers of Prometheus remote-write carry
|
||||||
flush_trigger_size = "512MB"
|
# headers = {}
|
||||||
|
|
||||||
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
|
|
||||||
## The estimated size is calculated as:
|
|
||||||
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
|
|
||||||
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
|
|
||||||
## Set to "0" to let the system decide the checkpoint trigger size.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
checkpoint_trigger_size = "128MB"
|
|
||||||
|
|
||||||
## Concurrent task limit for automatically WAL pruning.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
auto_prune_parallelism = 10
|
|
||||||
|
|
||||||
## Number of topics used for remote WAL.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
num_topics = 64
|
|
||||||
|
|
||||||
## Topic selector type.
|
|
||||||
## Available selector types:
|
|
||||||
## - `round_robin` (default)
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
selector_type = "round_robin"
|
|
||||||
|
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
|
||||||
## Only accepts strings that match the following regular expression pattern:
|
|
||||||
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
|
||||||
|
|
||||||
## Expected number of replicas of each partition.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
replication_factor = 1
|
|
||||||
|
|
||||||
## The timeout for creating a Kafka topic.
|
|
||||||
## **It's only used when the provider is `kafka`**.
|
|
||||||
create_topic_timeout = "30s"
|
|
||||||
|
|
||||||
# The Kafka SASL configuration.
|
|
||||||
# **It's only used when the provider is `kafka`**.
|
|
||||||
# Available SASL mechanisms:
|
|
||||||
# - `PLAIN`
|
|
||||||
# - `SCRAM-SHA-256`
|
|
||||||
# - `SCRAM-SHA-512`
|
|
||||||
# [wal.sasl]
|
|
||||||
# type = "SCRAM-SHA-512"
|
|
||||||
# username = "user_kafka"
|
|
||||||
# password = "secret"
|
|
||||||
|
|
||||||
# The Kafka TLS configuration.
|
|
||||||
# **It's only used when the provider is `kafka`**.
|
|
||||||
# [wal.tls]
|
|
||||||
# server_ca_cert_path = "/path/to/server_cert"
|
|
||||||
# client_cert_path = "/path/to/client_cert"
|
|
||||||
# client_key_path = "/path/to/key"
|
|
||||||
|
|
||||||
## Configuration options for the event recorder.
|
|
||||||
[event_recorder]
|
|
||||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
|
||||||
ttl = "90d"
|
|
||||||
|
|
||||||
## Configuration options for the stats persistence.
|
|
||||||
[stats_persistence]
|
|
||||||
## TTL for the stats table that will be used to store the stats.
|
|
||||||
## Set to `0s` to disable stats persistence.
|
|
||||||
## Default is `0s`.
|
|
||||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
|
||||||
## It is recommended to set a small value, e.g., `3h`.
|
|
||||||
ttl = "0s"
|
|
||||||
## The interval to persist the stats. Default is `10m`.
|
|
||||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
|
||||||
interval = "10m"
|
|
||||||
|
|
||||||
## The logging options.
|
|
||||||
[logging]
|
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
|
||||||
dir = "./greptimedb_data/logs"
|
|
||||||
|
|
||||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
## Enable OTLP tracing.
|
|
||||||
enable_otlp_tracing = false
|
|
||||||
|
|
||||||
## The OTLP tracing endpoint.
|
|
||||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
|
||||||
|
|
||||||
## Whether to append logs to stdout.
|
|
||||||
append_stdout = true
|
|
||||||
|
|
||||||
## The log format. Can be `text`/`json`.
|
|
||||||
log_format = "text"
|
|
||||||
|
|
||||||
## The maximum amount of log files.
|
|
||||||
max_log_files = 720
|
|
||||||
|
|
||||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
|
||||||
otlp_export_protocol = "http"
|
|
||||||
|
|
||||||
## Additional OTLP headers, only valid when using OTLP http
|
|
||||||
[logging.otlp_headers]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Authorization = "Bearer my-token"
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#Database = "My database"
|
|
||||||
|
|
||||||
|
|
||||||
## The percentage of tracing will be sampled and exported.
|
|
||||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
|
||||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
|
||||||
[logging.tracing_sample_ratio]
|
|
||||||
default_ratio = 1.0
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
|
||||||
#+ [tracing]
|
|
||||||
## The tokio console address.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
#+ tokio_console_addr = "127.0.0.1"
|
|
||||||
|
|
||||||
## The memory options.
|
|
||||||
[memory]
|
|
||||||
## Whether to enable heap profiling activation during startup.
|
|
||||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
|
||||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
|
||||||
## Default is true.
|
|
||||||
enable_heap_profiling = true
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
2
cyborg/.gitignore
vendored
2
cyborg/.gitignore
vendored
@@ -1,2 +0,0 @@
|
|||||||
node_modules
|
|
||||||
.env
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user