mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710a68d2d6 |
@@ -4,14 +4,12 @@ linker = "aarch64-linux-gnu-gcc"
|
||||
[alias]
|
||||
sqlness = "run --bin sqlness-runner --"
|
||||
|
||||
[unstable.git]
|
||||
shallow_index = true
|
||||
shallow_deps = true
|
||||
[unstable.gitoxide]
|
||||
fetch = true
|
||||
checkout = true
|
||||
list_files = true
|
||||
internal_use_git2 = false
|
||||
|
||||
[env]
|
||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
||||
[build]
|
||||
rustflags = [
|
||||
# lints
|
||||
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::implicit_clone",
|
||||
]
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[{Makefile,**.mk}]
|
||||
indent_style = tab
|
||||
21
.env.example
21
.env.example
@@ -14,23 +14,8 @@ GT_AZBLOB_CONTAINER=AZBLOB container
|
||||
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
|
||||
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
|
||||
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
# Settings for gcs test
|
||||
GT_GCS_BUCKET = GCS bucket
|
||||
GT_GCS_SCOPE = GCS scope
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_CREDENTIAL = GCS credential
|
||||
GT_GCS_CREDENTIAL_PATH = GCS credential path
|
||||
GT_GCS_ENDPOINT = GCS end point
|
||||
# Settings for kafka wal test
|
||||
GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
GT_FUZZ_INPUT_MAX_ROWS=2048
|
||||
GT_FUZZ_INPUT_MAX_TABLES=32
|
||||
GT_FUZZ_INPUT_MAX_COLUMNS=32
|
||||
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
|
||||
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8
|
||||
|
||||
27
.github/CODEOWNERS
vendored
27
.github/CODEOWNERS
vendored
@@ -1,27 +0,0 @@
|
||||
# GreptimeDB CODEOWNERS
|
||||
|
||||
# These owners will be the default owners for everything in the repo.
|
||||
|
||||
* @GreptimeTeam/db-approver
|
||||
|
||||
## [Module] Database Engine
|
||||
/src/index @zhongzc
|
||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||
/src/query @evenyag
|
||||
|
||||
## [Module] Distributed
|
||||
/src/common/meta @MichaelScofield
|
||||
/src/common/procedure @MichaelScofield
|
||||
/src/meta-client @MichaelScofield
|
||||
/src/meta-srv @MichaelScofield
|
||||
|
||||
## [Module] Write Ahead Log
|
||||
/src/log-store @v0y4g3r
|
||||
/src/store-api @v0y4g3r
|
||||
|
||||
## [Module] Metrics Engine
|
||||
/src/metric-engine @waynexia
|
||||
/src/promql @waynexia
|
||||
|
||||
## [Module] Flow
|
||||
/src/flow @zhongzc @waynexia
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Is something not working? Help us fix it!
|
||||
labels: [ "C-bug" ]
|
||||
labels: [ "bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@@ -21,7 +21,6 @@ body:
|
||||
- Locking issue
|
||||
- Performance issue
|
||||
- Unexpected error
|
||||
- User Experience
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
@@ -34,14 +33,9 @@ body:
|
||||
multiple: true
|
||||
options:
|
||||
- Standalone mode
|
||||
- Distributed Cluster
|
||||
- Storage Engine
|
||||
- Query Engine
|
||||
- Table Engine
|
||||
- Write Protocols
|
||||
- Metasrv
|
||||
- Frontend
|
||||
- Datanode
|
||||
- Meta
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
@@ -83,17 +77,6 @@ body:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: greptimedb
|
||||
attributes:
|
||||
label: What version of GreptimeDB did you use?
|
||||
description: |
|
||||
Please provide the version of GreptimeDB. For example:
|
||||
0.5.1 etc. You can get it by executing command line `greptime --version`.
|
||||
placeholder: "0.5.1"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
||||
url: https://greptime.com/slack
|
||||
about: Get free help from the Greptime community
|
||||
- name: Greptime Community Discussion
|
||||
url: https://github.com/greptimeTeam/discussions
|
||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||
about: Get free help from the Greptime community
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Enhancement
|
||||
description: Suggest an enhancement to existing functionality
|
||||
labels: [ "C-enhancement" ]
|
||||
labels: [ "enhancement" ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: type
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: New Feature
|
||||
name: Feature request
|
||||
description: Suggest a new feature for GreptimeDB
|
||||
labels: [ "C-feature" ]
|
||||
labels: [ "feature request" ]
|
||||
body:
|
||||
- type: markdown
|
||||
id: info
|
||||
@@ -1,18 +0,0 @@
|
||||
name: Build and push CI Docker image
|
||||
description: Build and push CI Docker image to local registry
|
||||
inputs:
|
||||
binary_path:
|
||||
default: "./bin"
|
||||
description: "Binary path"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push to local registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
||||
push: true
|
||||
tags: localhost:5001/greptime/greptimedb:latest
|
||||
build-args: |
|
||||
BINARY_PATH=${{ inputs.binary_path }}
|
||||
@@ -22,15 +22,15 @@ inputs:
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: "true"
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -41,34 +41,27 @@ runs:
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||
- name: Build and push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
@@ -76,7 +69,8 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
20
.github/actions/build-greptime-binary/action.yml
vendored
20
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,14 +24,6 @@ inputs:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: false
|
||||
default: 'greptime'
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -43,9 +35,7 @@ runs:
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
@@ -54,7 +44,7 @@ runs:
|
||||
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-files: ./target/$PROFILE_TARGET/greptime
|
||||
target-file: ./target/$PROFILE_TARGET/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
@@ -63,15 +53,13 @@ runs:
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-files: ./target/aarch64-linux-android/release/greptime
|
||||
target-file: ./target/aarch64-linux-android/release/greptime
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
14
.github/actions/build-greptime-images/action.yml
vendored
14
.github/actions/build-greptime-images/action.yml
vendored
@@ -34,8 +34,8 @@ inputs:
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: true
|
||||
default: 'false'
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -47,17 +47,13 @@ runs:
|
||||
password: ${{ inputs.image-registry-password }}
|
||||
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Download amd64 artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.amd64-artifact-name }}
|
||||
|
||||
@@ -70,7 +66,7 @@ runs:
|
||||
mv ${{ inputs.amd64-artifact-name }} amd64
|
||||
|
||||
- name: Download arm64 artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
if: ${{ inputs.arm64-artifact-name }}
|
||||
with:
|
||||
name: ${{ inputs.arm64-artifact-name }}
|
||||
|
||||
8
.github/actions/build-images/action.yml
vendored
8
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
||||
required: true
|
||||
push-latest-tag:
|
||||
description: Whether to push the latest tag
|
||||
required: true
|
||||
default: 'false'
|
||||
required: false
|
||||
default: 'true'
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
@@ -41,8 +41,8 @@ runs:
|
||||
image-name: ${{ inputs.image-name }}
|
||||
image-tag: ${{ inputs.version }}
|
||||
docker-file: docker/ci/ubuntu/Dockerfile
|
||||
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }}
|
||||
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
|
||||
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push-latest-tag: ${{ inputs.push-latest-tag }}
|
||||
|
||||
|
||||
37
.github/actions/build-linux-artifacts/action.yml
vendored
37
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -16,13 +16,7 @@ inputs:
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: "false"
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: true
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: true
|
||||
default: 'false'
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -36,19 +30,28 @@ runs:
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime # Builds standard greptime binary
|
||||
- name: Build standard greptime
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: pyo3_backend,servers/dashboard
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
@@ -57,8 +60,6 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -67,7 +68,7 @@ runs:
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -75,17 +76,13 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: ${{ inputs.image-registry }}
|
||||
image-namespace: ${{ inputs.image-namespace }}
|
||||
|
||||
21
.github/actions/build-macos-artifacts/action.yml
vendored
21
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -4,6 +4,9 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -40,9 +43,10 @@ runs:
|
||||
brew install protobuf
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
|
||||
- name: Start etcd # For integration tests.
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
@@ -55,22 +59,15 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
@@ -78,8 +75,6 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
@@ -90,5 +85,5 @@ runs:
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
@@ -4,6 +4,9 @@ inputs:
|
||||
arch:
|
||||
description: Architecture to build
|
||||
required: true
|
||||
rust-toolchain:
|
||||
description: Rust toolchain to use
|
||||
required: true
|
||||
cargo-profile:
|
||||
description: Cargo profile to build
|
||||
required: true
|
||||
@@ -22,17 +25,27 @@ inputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: ${{ inputs.arch }}
|
||||
toolchain: ${{ inputs.rust-toolchain }}
|
||||
targets: ${{ inputs.arch }}
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install PyArrow Package
|
||||
shell: pwsh
|
||||
run: pip install pyarrow
|
||||
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
@@ -46,25 +59,22 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: pwsh
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
|
||||
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb
|
||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
19
.github/actions/fuzz-test/action.yaml
vendored
19
.github/actions/fuzz-test/action.yaml
vendored
@@ -1,19 +0,0 @@
|
||||
name: Fuzz Test
|
||||
description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
required: true
|
||||
max-total-time:
|
||||
description: "Max total time(secs)"
|
||||
required: true
|
||||
unstable:
|
||||
default: 'false'
|
||||
description: "Enable unstable feature"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
||||
|
||||
@@ -9,13 +9,13 @@ runs:
|
||||
steps:
|
||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||
# ${WORKING_DIR}
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||
# ...
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Create git tag for release
|
||||
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
|
||||
|
||||
24
.github/actions/release-cn-artifacts/action.yaml
vendored
24
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
||||
required: true
|
||||
upload-to-s3:
|
||||
description: Upload to S3
|
||||
required: true
|
||||
default: 'false'
|
||||
required: false
|
||||
default: 'true'
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: false
|
||||
@@ -64,34 +64,26 @@ inputs:
|
||||
upload-max-retry-times:
|
||||
description: Max retry times for uploading artifacts to S3
|
||||
required: false
|
||||
default: "30"
|
||||
default: "20"
|
||||
upload-retry-timeout:
|
||||
description: Timeout for uploading artifacts to S3
|
||||
required: false
|
||||
default: "120" # minutes
|
||||
default: "30" # minutes
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ inputs.artifacts-dir }}
|
||||
|
||||
- name: Install s5cmd
|
||||
shell: bash
|
||||
run: |
|
||||
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||
sudo mv s5cmd /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/s5cmd
|
||||
|
||||
- name: Release artifacts to cn region
|
||||
uses: nick-invision/retry@v2
|
||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||
AWS_REGION: ${{ inputs.aws-cn-region }}
|
||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||
with:
|
||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||
@@ -131,10 +123,10 @@ runs:
|
||||
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||
run: |
|
||||
./.github/scripts/copy-image.sh \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
|
||||
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||
|
||||
- name: Push latest greptimedb-centos image from DockerHub to ACR
|
||||
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||
shell: bash
|
||||
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||
env:
|
||||
|
||||
17
.github/actions/setup-chaos/action.yml
vendored
17
.github/actions/setup-chaos/action.yml
vendored
@@ -1,17 +0,0 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
||||
kubectl create ns chaos-mesh
|
||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
||||
- name: Print Chaos-mesh
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get po -n chaos-mesh
|
||||
16
.github/actions/setup-cyborg/action.yml
vendored
16
.github/actions/setup-cyborg/action.yml
vendored
@@ -1,16 +0,0 @@
|
||||
name: Setup cyborg environment
|
||||
description: Setup cyborg environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
shell: bash
|
||||
run: pnpm tsx -v
|
||||
32
.github/actions/setup-etcd-cluster/action.yml
vendored
32
.github/actions/setup-etcd-cluster/action.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 1
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set resources.limits.cpu=1500m \
|
||||
--set resources.limits.memory=2Gi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag=3.6.1-debian-12-r3 \
|
||||
--version 12.0.8 \
|
||||
-n ${{ inputs.namespace }}
|
||||
@@ -1,95 +0,0 @@
|
||||
name: Setup GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
frontend-replicas:
|
||||
default: 2
|
||||
description: "Number of Frontend replicas"
|
||||
datanode-replicas:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||
description: "Etcd endpoints"
|
||||
values-filename:
|
||||
default: "with-minio.yaml"
|
||||
enable-region-failover:
|
||||
default: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 3
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
-n my-greptimedb \
|
||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
kubectl get pods -n my-greptimedb
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
@@ -1,13 +0,0 @@
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
@@ -1,33 +0,0 @@
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
@@ -1,29 +0,0 @@
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
frontend:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
@@ -1,51 +0,0 @@
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
filters:
|
||||
- log_store=debug
|
||||
meta:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
auto_prune_interval = "30s"
|
||||
trigger_flush_threshold = 100
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "120s"
|
||||
datanode:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
compact_rt_size = 2
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
overwrite_entry_start_id = true
|
||||
frontend:
|
||||
configData: |-
|
||||
[runtime]
|
||||
global_rt_size = 4
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "120s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
remoteWal:
|
||||
enabled: true
|
||||
kafka:
|
||||
brokerEndpoints:
|
||||
- "kafka.kafka-cluster.svc.cluster.local:9092"
|
||||
30
.github/actions/setup-kafka-cluster/action.yml
vendored
30
.github/actions/setup-kafka-cluster/action.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: Setup Kafka cluster
|
||||
description: Deploy Kafka cluster on Kubernetes
|
||||
inputs:
|
||||
controller-replicas:
|
||||
default: 3
|
||||
description: "Kafka controller replicas"
|
||||
namespace:
|
||||
default: "kafka-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
--set controller.resources.requests.cpu=50m \
|
||||
--set controller.resources.requests.memory=128Mi \
|
||||
--set controller.resources.limits.cpu=2000m \
|
||||
--set controller.resources.limits.memory=2Gi \
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/kafka \
|
||||
--set image.tag=3.9.0-debian-12-r1 \
|
||||
--version 31.0.0 \
|
||||
-n ${{ inputs.namespace }}
|
||||
10
.github/actions/setup-kind/action.yml
vendored
10
.github/actions/setup-kind/action.yml
vendored
@@ -1,10 +0,0 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/kind-with-registry.sh
|
||||
24
.github/actions/setup-minio/action.yml
vendored
24
.github/actions/setup-minio/action.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: Setup Minio cluster
|
||||
description: Deploy Minio cluster on Kubernetes
|
||||
inputs:
|
||||
replicas:
|
||||
default: 1
|
||||
description: "replicas"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm upgrade --install minio \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set replicas=${{ inputs.replicas }} \
|
||||
--set mode=standalone \
|
||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||
--set buckets[0].name=default \
|
||||
--set service.port=80,service.targetPort=9000 \
|
||||
minio/minio \
|
||||
--create-namespace \
|
||||
-n minio
|
||||
@@ -1,32 +0,0 @@
|
||||
name: Setup PostgreSQL
|
||||
description: Deploy PostgreSQL on Kubernetes
|
||||
inputs:
|
||||
postgres-replicas:
|
||||
default: 1
|
||||
description: "Number of PostgreSQL replicas"
|
||||
namespace:
|
||||
default: "postgres-namespace"
|
||||
description: "The PostgreSQL namespace"
|
||||
storage-size:
|
||||
default: "1Gi"
|
||||
description: "Storage size for PostgreSQL"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install PostgreSQL
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/postgresql \
|
||||
--set image.tag=17.5.0-debian-12-r3 \
|
||||
--version 16.7.4 \
|
||||
--set persistence.size=${{ inputs.storage-size }} \
|
||||
--set postgresql.username=greptimedb \
|
||||
--set postgresql.password=admin \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,14 +57,3 @@ runs:
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kind-logs
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
4
.github/actions/start-runner/action.yml
vendored
4
.github/actions/start-runner/action.yml
vendored
@@ -38,7 +38,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
@@ -56,7 +56,7 @@ runs:
|
||||
|
||||
- name: Start EC2 runner
|
||||
if: startsWith(inputs.runner, 'ec2')
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
id: start-linux-arm64-ec2-runner
|
||||
with:
|
||||
mode: start
|
||||
|
||||
4
.github/actions/stop-runner/action.yml
vendored
4
.github/actions/stop-runner/action.yml
vendored
@@ -25,7 +25,7 @@ runs:
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
@@ -33,7 +33,7 @@ runs:
|
||||
|
||||
- name: Stop EC2 runner
|
||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||
uses: machulav/ec2-github-runner@v2.3.8
|
||||
uses: machulav/ec2-github-runner@v2
|
||||
with:
|
||||
mode: stop
|
||||
label: ${{ inputs.label }}
|
||||
|
||||
27
.github/actions/upload-artifacts/action.yml
vendored
27
.github/actions/upload-artifacts/action.yml
vendored
@@ -4,9 +4,9 @@ inputs:
|
||||
artifacts-dir:
|
||||
description: Directory to store artifacts
|
||||
required: true
|
||||
target-files:
|
||||
description: The multiple target files to upload, separated by comma
|
||||
required: false
|
||||
target-file:
|
||||
description: The path of the target artifact
|
||||
required: true
|
||||
version:
|
||||
description: Version of the artifact
|
||||
required: true
|
||||
@@ -18,21 +18,16 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
if: ${{ inputs.target-files != '' }}
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p ${{ inputs.artifacts-dir }}
|
||||
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
|
||||
for file in "${FILES[@]}"; do
|
||||
cp "$file" ${{ inputs.artifacts-dir }}/
|
||||
done
|
||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||
|
||||
# The compressed artifacts will use the following layout:
|
||||
# greptime-linux-amd64-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-v0.3.0
|
||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
||||
# greptime-linux-amd64-pyo3-v0.3.0
|
||||
# └── greptime
|
||||
- name: Compress artifacts and calculate checksum
|
||||
working-directory: ${{ inputs.working-dir }}
|
||||
@@ -54,15 +49,15 @@ runs:
|
||||
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||
# However, when we use 'actions/download-artifact' to download the artifacts, it will be automatically unzipped.
|
||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
|
||||
|
||||
- name: Upload checksum
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||
|
||||
3
.github/cargo-blacklist.txt
vendored
3
.github/cargo-blacklist.txt
vendored
@@ -1,3 +0,0 @@
|
||||
native-tls
|
||||
openssl
|
||||
aws-lc-sys
|
||||
4
.github/doc-label-config.yml
vendored
Normal file
4
.github/doc-label-config.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
15
.github/labeler.yaml
vendored
15
.github/labeler.yaml
vendored
@@ -1,15 +0,0 @@
|
||||
ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
dashboard:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: grafana/**
|
||||
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
13
.github/pr-title-breaking-change-label-config.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
12
.github/pr-title-checker-config.json
vendored
Normal file
12
.github/pr-title-checker-config.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
22
.github/pull_request_template.md
vendored
22
.github/pull_request_template.md
vendored
@@ -1,11 +1,8 @@
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
|
||||
|
||||
## What's changed and what's your intention?
|
||||
|
||||
<!--
|
||||
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
|
||||
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
|
||||
|
||||
Please explain IN DETAIL what the changes are in this PR and why they are needed:
|
||||
|
||||
@@ -13,14 +10,11 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
- How does this PR work? Need a brief introduction for the changed logic (optional)
|
||||
- Describe clearly one logical change and avoid lazy messages (optional)
|
||||
- Describe any limitations of the current code (optional)
|
||||
- Describe if this PR will break **API or data compatibility** (optional)
|
||||
-->
|
||||
|
||||
## PR Checklist
|
||||
Please convert it to a draft if some of the following conditions are not met.
|
||||
## Checklist
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
- [ ] API changes are backward compatible.
|
||||
- [ ] Schema or data changes are backward compatible.
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR does not require documentation updates.
|
||||
|
||||
## Refer to a related PR or issue link (optional)
|
||||
|
||||
14
.github/scripts/check-install-script.sh
vendored
14
.github/scripts/check-install-script.sh
vendored
@@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# Get the latest version of github.com/GreptimeTeam/greptimedb
|
||||
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
|
||||
|
||||
echo "Downloading the latest version: $VERSION"
|
||||
|
||||
# Download the install script
|
||||
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
|
||||
|
||||
# Execute the `greptime` command
|
||||
./greptime --version
|
||||
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get current version
|
||||
CURRENT_VERSION=$1
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Failed to get current version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version from GitHub Releases
|
||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||
|
||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||
echo "Error: Failed to fetch latest version from GitHub"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version
|
||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||
|
||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||
echo "Error: No valid version found in GitHub releases"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
|
||||
echo "Current version: $CLEAN_CURRENT"
|
||||
echo "Latest release version: $CLEAN_LATEST"
|
||||
|
||||
# Use sort -V to compare versions
|
||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||
|
||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,25 +8,24 @@ set -e
|
||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||
function create_version() {
|
||||
# Read from environment variables.
|
||||
# Read from envrionment variables.
|
||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||
echo "GITHUB_EVENT_NAME is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||
# NOTE: Need a `v` prefix for the version string.
|
||||
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||
echo "NEXT_RELEASE_VERSION is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||
exit 0
|
||||
@@ -36,7 +35,7 @@ function create_version() {
|
||||
# It will be like 'dev-2023080819-f0e7216c'.
|
||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||
if [ -z "$COMMIT_SHA" ]; then
|
||||
echo "COMMIT_SHA is empty in dev build" >&2
|
||||
echo "COMMIT_SHA is empty in dev build"
|
||||
exit 1
|
||||
fi
|
||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||
@@ -46,7 +45,7 @@ function create_version() {
|
||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||
echo "GITHUB_REF_NAME is empty in push event"
|
||||
exit 1
|
||||
fi
|
||||
echo "$GITHUB_REF_NAME"
|
||||
@@ -55,15 +54,15 @@ function create_version() {
|
||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||
else
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# You can run as following examples:
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
||||
create_version
|
||||
|
||||
46
.github/scripts/deploy-greptimedb.sh
vendored
46
.github/scripts/deploy-greptimedb.sh
vendored
@@ -3,16 +3,14 @@
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
||||
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
|
||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||
|
||||
# Create a cluster with 1 control-plane node and 5 workers.
|
||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||
function create_kind_cluster() {
|
||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||
kind: Cluster
|
||||
@@ -37,16 +35,10 @@ function add_greptime_chart() {
|
||||
function deploy_etcd_cluster() {
|
||||
local namespace="$1"
|
||||
|
||||
helm upgrade --install etcd "$ETCD_CHART" \
|
||||
--version "$ETCD_CHART_VERSION" \
|
||||
--create-namespace \
|
||||
helm install etcd "$ETCD_CHART" \
|
||||
--set replicaCount=3 \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set global.security.allowInsecureImages=true \
|
||||
--set image.registry=docker.io \
|
||||
--set image.repository=greptime/etcd \
|
||||
--set image.tag="$ETCD_IMAGE_TAG" \
|
||||
-n "$namespace"
|
||||
|
||||
# Wait for etcd cluster to be ready.
|
||||
@@ -56,8 +48,7 @@ function deploy_etcd_cluster() {
|
||||
# Deploy greptimedb-operator.
|
||||
function deploy_greptimedb_operator() {
|
||||
# Use the latest chart and image.
|
||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||
--create-namespace \
|
||||
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||
--set image.tag=latest \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
@@ -75,11 +66,9 @@ function deploy_greptimedb_cluster() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
-n "$install_namespace"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
@@ -112,17 +101,15 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
|
||||
deploy_etcd_cluster "$install_namespace"
|
||||
|
||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--create-namespace \
|
||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set objectStorage.s3.region="$AWS_REGION" \
|
||||
--set objectStorage.s3.root="$DATA_ROOT" \
|
||||
--set objectStorage.credentials.secretName=s3-credentials \
|
||||
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||
--set storage.s3.region="$AWS_REGION" \
|
||||
--set storage.s3.root="$DATA_ROOT" \
|
||||
--set storage.credentials.secretName=s3-credentials \
|
||||
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Wait for greptimedb cluster to be ready.
|
||||
while true; do
|
||||
@@ -147,8 +134,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
||||
# Deploy standalone greptimedb.
|
||||
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||
function deploy_standalone_greptimedb() {
|
||||
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--create-namespace \
|
||||
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||
|
||||
|
||||
66
.github/scripts/kind-with-registry.sh
vendored
66
.github/scripts/kind-with-registry.sh
vendored
@@ -1,66 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# 1. Create registry container unless it already exists
|
||||
reg_name='kind-registry'
|
||||
reg_port='5001'
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||
docker run \
|
||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
||||
registry:2
|
||||
fi
|
||||
|
||||
# 2. Create kind cluster with containerd registry config dir enabled
|
||||
# TODO: kind will eventually enable this by default and this patch will
|
||||
# be unnecessary.
|
||||
#
|
||||
# See:
|
||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
cat <<EOF | kind create cluster --wait 2m --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
EOF
|
||||
|
||||
# 3. Add the registry config to the nodes
|
||||
#
|
||||
# This is necessary because localhost resolves to loopback addresses that are
|
||||
# network-namespace local.
|
||||
# In other words: localhost in the container is not localhost on the host.
|
||||
#
|
||||
# We want a consistent name that works from both ends, so we tell containerd to
|
||||
# alias localhost:${reg_port} to the registry container when pulling images
|
||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||
for node in $(kind get nodes); do
|
||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||
[host."http://${reg_name}:5000"]
|
||||
EOF
|
||||
done
|
||||
|
||||
# 4. Connect the registry to the cluster network if not already connected
|
||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||
docker network connect "kind" "${reg_name}"
|
||||
fi
|
||||
|
||||
# 5. Document the local registry
|
||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-registry-hosting
|
||||
namespace: kube-public
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: "localhost:${reg_port}"
|
||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||
EOF
|
||||
34
.github/scripts/pull-test-deps-images.sh
vendored
34
.github/scripts/pull-test-deps-images.sh
vendored
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
||||
|
||||
set -e
|
||||
|
||||
MAX_RETRIES=3
|
||||
|
||||
IMAGES=(
|
||||
"greptime/zookeeper:3.7"
|
||||
"greptime/kafka:3.9.0-debian-12-r1"
|
||||
"greptime/etcd:3.6.1-debian-12-r3"
|
||||
"greptime/minio:2024"
|
||||
"greptime/mysql:5.7"
|
||||
)
|
||||
|
||||
for image in "${IMAGES[@]}"; do
|
||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
||||
if docker pull "$image"; then
|
||||
# Successfully pulled the image.
|
||||
break
|
||||
else
|
||||
# Use some simple exponential backoff to avoid rate limiting.
|
||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
||||
sleep_seconds=$((attempt * 5))
|
||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
||||
sleep $sleep_seconds # 5s, 10s delays
|
||||
else
|
||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
37
.github/scripts/update-dev-builder-version.sh
vendored
37
.github/scripts/update-dev-builder-version.sh
vendored
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEV_BUILDER_IMAGE_TAG=$1
|
||||
|
||||
update_dev_builder_version() {
|
||||
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||
echo "Error: Should specify the dev-builder image tag"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Git configs.
|
||||
git config --global user.email greptimedb-ci@greptime.com
|
||||
git config --global user.name greptimedb-ci
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update the dev-builder image tag in the Makefile.
|
||||
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||
|
||||
# Commit the changes.
|
||||
git add Makefile
|
||||
git commit -s -m "ci: update dev-builder image tag"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "ci: update dev-builder image tag" \
|
||||
--body "This PR updates the dev-builder image tag" \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_dev_builder_version
|
||||
46
.github/scripts/update-helm-charts-version.sh
vendored
46
.github/scripts/update-helm-charts-version.sh
vendored
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_helm_charts_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-helm-charts-version@greptime.com
|
||||
git config --global user.name update-helm-charts-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||
cd helm-charts
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/helm-charts
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||
|
||||
# Update docs.
|
||||
make docs
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_helm_charts_version
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||
|
||||
update_homebrew_greptime_version() {
|
||||
# Configure Git configs.
|
||||
git config --global user.email update-greptime-version@greptime.com
|
||||
git config --global user.name update-greptime-version
|
||||
|
||||
# Clone helm-charts repository.
|
||||
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||
cd homebrew-greptime
|
||||
|
||||
# Set default remote for gh CLI
|
||||
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||
|
||||
# Checkout a new branch.
|
||||
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
# Update version.
|
||||
make update-greptime-version VERSION=${VERSION}
|
||||
|
||||
# Commit the changes.
|
||||
git add .
|
||||
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||
git push origin $BRANCH_NAME
|
||||
|
||||
# Create a Pull Request.
|
||||
gh pr create \
|
||||
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||
--body "This PR updates the GreptimeDB version." \
|
||||
--base main \
|
||||
--head $BRANCH_NAME \
|
||||
--reviewer zyy17 \
|
||||
--reviewer daviderli614
|
||||
}
|
||||
|
||||
update_homebrew_greptime_version
|
||||
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,13 +27,13 @@ function upload_artifacts() {
|
||||
# ├── latest-version.txt
|
||||
# ├── latest-nightly-version.txt
|
||||
# ├── v0.1.0
|
||||
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||
# └── v0.2.0
|
||||
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||
s5cmd cp \
|
||||
aws s3 cp \
|
||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||
done
|
||||
}
|
||||
@@ -41,11 +41,11 @@ function upload_artifacts() {
|
||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||
function update_version_info() {
|
||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Updating latest-version.txt"
|
||||
echo "$VERSION" > latest-version.txt
|
||||
s5cmd cp \
|
||||
aws s3 cp \
|
||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||
fi
|
||||
|
||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||
echo "Updating latest-nightly-version.txt"
|
||||
echo "$VERSION" > latest-nightly-version.txt
|
||||
s5cmd cp \
|
||||
aws s3 cp \
|
||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||
fi
|
||||
fi
|
||||
|
||||
18
.github/workflows/apidoc.yml
vendored
18
.github/workflows/apidoc.yml
vendored
@@ -1,7 +1,7 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
@@ -12,17 +12,20 @@ on:
|
||||
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- run: cargo doc --workspace --no-deps --document-private-items
|
||||
- run: |
|
||||
cat <<EOF > target/doc/index.html
|
||||
@@ -37,4 +40,3 @@ jobs:
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
folder: target/doc
|
||||
single-commit: true
|
||||
|
||||
35
.github/workflows/dependency-check.yml
vendored
35
.github/workflows/dependency-check.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Check Dependencies
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check-dependencies:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
|
||||
- name: Run cargo tree
|
||||
run: cargo tree --prefix none > dependencies.txt
|
||||
|
||||
- name: Extract dependency names
|
||||
run: awk '{print $1}' dependencies.txt > dependency_names.txt
|
||||
|
||||
- name: Check for blacklisted crates
|
||||
run: |
|
||||
while read -r dep; do
|
||||
if grep -qFx "$dep" dependency_names.txt; then
|
||||
echo "Blacklisted crate '$dep' found in dependencies."
|
||||
exit 1
|
||||
fi
|
||||
done < .github/cargo-blacklist.txt
|
||||
echo "No blacklisted crates found."
|
||||
96
.github/workflows/dev-build.yml
vendored
96
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -55,11 +55,6 @@ on:
|
||||
description: Build and push images to DockerHub and ACR
|
||||
required: false
|
||||
default: true
|
||||
upload_artifacts_to_s3:
|
||||
type: boolean
|
||||
description: Whether upload artifacts to s3
|
||||
required: false
|
||||
default: false
|
||||
cargo_profile:
|
||||
type: choice
|
||||
description: The cargo profile to use in building GreptimeDB.
|
||||
@@ -81,6 +76,9 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
IMAGE_NAME: greptimedb-dev
|
||||
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
@@ -88,7 +86,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -103,10 +101,9 @@ jobs:
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -158,18 +155,16 @@ jobs:
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
persist-credentials: true
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -179,8 +174,6 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -191,18 +184,16 @@ jobs:
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Checkout greptimedb
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
ref: ${{ inputs.commit }}
|
||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
persist-credentials: true
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -212,8 +203,6 @@ jobs:
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
dev-mode: true # Only build the standard greptime binary.
|
||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -223,34 +212,26 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
run: |
|
||||
@@ -263,20 +244,19 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -286,7 +266,6 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||
dev-mode: true # Only build the standard images(exclude centos images).
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
update-version-info: false # Don't update the version info in S3.
|
||||
@@ -295,17 +274,16 @@ jobs:
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -321,17 +299,16 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -344,40 +321,25 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notify dev build failed result
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
|
||||
855
.github/workflows/develop.yml
vendored
855
.github/workflows/develop.yml
vendored
@@ -1,9 +1,18 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 15 * * 1-5"
|
||||
merge_group:
|
||||
pull_request:
|
||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
- '**.md'
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'config/**'
|
||||
@@ -11,8 +20,6 @@ on:
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -21,842 +28,174 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check typos and docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: crate-ci/typos@master
|
||||
- name: Check the config docs
|
||||
run: |
|
||||
make config-docs && \
|
||||
git diff --name-only --exit-code ./config/config.md \
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
|
||||
license-header-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: korandoru/hawkeye@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Clippy` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo check
|
||||
run: cargo check --locked --workspace --all-targets
|
||||
|
||||
toml:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Toml Check
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
toolchain: stable
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install taplo
|
||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||
run: cargo +stable install taplo-cli --version ^0.8 --locked
|
||||
- name: Run taplo
|
||||
run: taplo format --check
|
||||
|
||||
build:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bins && \
|
||||
mv ./target/debug/greptime bins && \
|
||||
mv ./target/debug/sqlness-runner bins
|
||||
- name: Print greptime binaries info
|
||||
run: ls -lh bins
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: bins
|
||||
version: current
|
||||
|
||||
fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Unstable Fuzz Test
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz cargo-gc-bin --force
|
||||
- name: Download pre-built binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Run Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
unstable: 'true'
|
||||
- name: Upload unstable fuzz test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
|
||||
build-greptime-ci:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin --force
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/ci/greptime bin
|
||||
- name: Print greptime binaries info
|
||||
run: ls -lh bin
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pod
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
distributed-fuzztest-with-chaos:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
include:
|
||||
- target: "fuzz_migrate_mito_regions"
|
||||
mode:
|
||||
name: "Local WAL"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- target: "fuzz_migrate_metric_regions"
|
||||
mode:
|
||||
name: "Local WAL"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
steps:
|
||||
- name: Remove unused software
|
||||
run: |
|
||||
echo "Disk space before:"
|
||||
df -h
|
||||
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
|
||||
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
|
||||
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
|
||||
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune --all --force
|
||||
sudo docker builder prune -a
|
||||
echo "Disk space after:"
|
||||
df -h
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Chaos Mesh
|
||||
uses: ./.github/actions/setup-chaos
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluster
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluster
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin --force
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
enable-region-failover: ${{ matrix.mode.kafka }}
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Describe pods
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe pod -n my-greptimedb
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
kafka: false
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "PostgreSQL KvBackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
- name: "MySQL Kvbackend"
|
||||
opts: "--setup-mysql"
|
||||
kafka: false
|
||||
os: [ ubuntu-20.04-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka
|
||||
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: cargo sqlness
|
||||
- name: Upload sqlness logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs-${{ matrix.mode.name }}
|
||||
path: /tmp/sqlness*
|
||||
name: sqlness-logs
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: rustfmt
|
||||
- name: Check format
|
||||
run: make fmt-check
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: clippy
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Run cargo clippy
|
||||
run: make clippy
|
||||
|
||||
check-udeps:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check Unused Dependencies
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "check-udeps"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Check unused dependencies
|
||||
run: make check-udeps
|
||||
|
||||
conflict-check:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
name: Check for conflict
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Merge Conflict Finder
|
||||
uses: olivernybroe/action-conflict-finder@v4.0
|
||||
|
||||
test:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 60
|
||||
needs: [conflict-check, clippy, fmt, check-udeps]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
cache: false
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
cache-all-crates: "true"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
RUST_MIN_STACK: 8388608 # 8MB
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||
GT_POSTGRES15_SCHEMA: test_schema
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
coverage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||
runs-on: ubuntu-22.04-8-cores
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: rui314/setup-mold@v1
|
||||
- name: Install toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
components: llvm-tools
|
||||
cache: false
|
||||
version: "14.0"
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares cross multiple jobs
|
||||
shared-key: "coverage-test"
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
- name: Install latest nextest release
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
|
||||
- name: Setup external services
|
||||
working-directory: tests-integration/fixtures
|
||||
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Setup etcd server
|
||||
working-directory: tests-integration/fixtures/etcd
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
|
||||
GT_POSTGRES15_SCHEMA: test_schema
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Codecov upload
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./lcov.info
|
||||
flags: rust
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
# compat:
|
||||
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-22.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Download pre-built binaries
|
||||
# uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# name: bins
|
||||
# path: .
|
||||
# - name: Unzip binaries
|
||||
# run: |
|
||||
# mkdir -p ./bins/current
|
||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
39
.github/workflows/doc-issue.yml
vendored
Normal file
39
.github/workflows/doc-issue.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@main
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
20
.github/workflows/doc-label.yml
vendored
Normal file
20
.github/workflows/doc-label.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.3
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
27
.github/workflows/docbot.yml
vendored
27
.github/workflows/docbot.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: Follow Up Docs
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||
env:
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
56
.github/workflows/docs.yml
vendored
56
.github/workflows/docs.yml
vendored
@@ -9,10 +9,9 @@ on:
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
@@ -21,8 +20,6 @@ on:
|
||||
- '.dockerignore'
|
||||
- 'docker/**'
|
||||
- '.gitignore'
|
||||
- 'grafana/**'
|
||||
- 'Makefile'
|
||||
workflow_dispatch:
|
||||
|
||||
name: CI
|
||||
@@ -33,64 +30,41 @@ name: CI
|
||||
jobs:
|
||||
typos:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: korandoru/hawkeye@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
check-udeps:
|
||||
name: Unused Dependencies
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
name: Sqlness Test
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
26
.github/workflows/grafana.yml
vendored
26
.github/workflows/grafana.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Check Grafana Panels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
||||
|
||||
jobs:
|
||||
check-panels:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Install jq (required for the script)
|
||||
- name: Install jq
|
||||
run: sudo apt-get install -y jq
|
||||
|
||||
# Make the check.sh script executable
|
||||
- name: Check grafana dashboards
|
||||
run: |
|
||||
make check-dashboards
|
||||
16
.github/workflows/license.yaml
vendored
Normal file
16
.github/workflows/license.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: License checker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check License Header
|
||||
uses: korandoru/hawkeye@v3
|
||||
80
.github/workflows/nightly-build.yml
vendored
80
.github/workflows/nightly-build.yml
vendored
@@ -12,13 +12,13 @@ on:
|
||||
linux_amd64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
default: ec2-c6i.2xlarge-amd64
|
||||
options:
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -27,7 +27,7 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
default: ec2-c6g.2xlarge-arm64
|
||||
options:
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
@@ -85,10 +85,9 @@ jobs:
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -138,10 +137,9 @@ jobs:
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -149,8 +147,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -160,10 +156,9 @@ jobs:
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -171,8 +166,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
@@ -182,25 +175,23 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
- name: Set nightly build result
|
||||
id: set-nightly-build-result
|
||||
@@ -214,23 +205,22 @@ jobs:
|
||||
allocate-runners,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -240,26 +230,24 @@ jobs:
|
||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
upload-to-s3: false
|
||||
dev-mode: false
|
||||
update-version-info: false # Don't update version info in S3.
|
||||
push-latest-tag: false
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -275,17 +263,16 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -298,38 +285,25 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
- name: Notify nightly build successful result
|
||||
- name: Notifiy nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notify nightly build failed result
|
||||
- name: Notifiy nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
|
||||
176
.github/workflows/nightly-ci.yml
vendored
176
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,8 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 23 * * 1-4"
|
||||
- cron: '0 23 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -9,168 +11,90 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check install.sh
|
||||
run: ./.github/scripts/check-install-script.sh
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
- name: Upload sqlness logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-kind
|
||||
path: /tmp/kind/
|
||||
retention-days: 3
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-2022-8-cores
|
||||
permissions:
|
||||
issues: write
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: make sqlness-test
|
||||
env:
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
- name: Upload sqlness logs
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
|
||||
path: ${{ runner.temp }}/greptime-*.log
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-2022-8-cores
|
||||
runs-on: windows-latest-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
- uses: actions/checkout@v4.1.0
|
||||
- uses: arduino/setup-protoc@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
components: llvm-tools-preview
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Install Cargo Nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
uses: Vampire/setup-wsl@v2
|
||||
with:
|
||||
distribution: Ubuntu-22.04
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F dashboard
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
cleanbuild-linux-nix:
|
||||
name: Run clean build on Linux
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- run: nix develop --command cargo check --bin greptime
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
- name: Set check result
|
||||
id: set-check-result
|
||||
run: |
|
||||
echo "check-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [check-status]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI has completed successfully."}
|
||||
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
|
||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
42
.github/workflows/pr-labeling.yaml
vendored
42
.github/workflows/pr-labeling.yaml
vendored
@@ -1,42 +0,0 @@
|
||||
name: 'PR Labeling'
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: ".github/labeler.yaml"
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
size-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: pascalgn/size-label-action@v0.5.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"300": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2000": "XXL"
|
||||
}
|
||||
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
29
.github/workflows/pr-title-checker.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "PR Title Checker"
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
207
.github/workflows/release-dev-builder-images.yaml
vendored
207
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -1,14 +1,12 @@
|
||||
name: Release dev-builder images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- rust-toolchain.toml
|
||||
- 'docker/dev-builder/**'
|
||||
workflow_dispatch: # Allows you to run this workflow manually.
|
||||
inputs:
|
||||
version:
|
||||
description: Version of the dev-builder
|
||||
required: false
|
||||
default: latest
|
||||
release_dev_builder_ubuntu_image:
|
||||
type: boolean
|
||||
description: Release dev-builder-ubuntu image
|
||||
@@ -24,209 +22,64 @@ on:
|
||||
description: Release dev-builder-android image
|
||||
required: false
|
||||
default: false
|
||||
update_dev_builder_image_tag:
|
||||
type: boolean
|
||||
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release-dev-builder-images:
|
||||
name: Release dev builder images
|
||||
# The jobs are triggered by the following events:
|
||||
# 1. Manually triggered workflow_dispatch event
|
||||
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.set-version.outputs.version }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||
runs-on: ubuntu-20.04-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Configure build image version
|
||||
id: set-version
|
||||
shell: bash
|
||||
run: |
|
||||
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
|
||||
buildTime=`date +%Y%m%d%H%M%S`
|
||||
BUILD_VERSION="$commitShortSHA-$buildTime"
|
||||
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
|
||||
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
|
||||
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
|
||||
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push dev builder images
|
||||
uses: ./.github/actions/build-dev-builder-images
|
||||
with:
|
||||
version: ${{ env.VERSION }}
|
||||
version: ${{ inputs.version }}
|
||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
|
||||
release-dev-builder-images-ecr:
|
||||
name: Release dev builder images to AWS ECR
|
||||
runs-on: ubuntu-latest
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.ECR_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr-public
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
env:
|
||||
AWS_REGION: ${{ vars.ECR_REGION }}
|
||||
with:
|
||||
registry-type: public
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
||||
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||
|
||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||
name: Release dev builder images to CN region
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Login to AliCloud Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
|
||||
- name: Push dev-builder-ubuntu image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-centos image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||
|
||||
- name: Push dev-builder-android image
|
||||
shell: bash
|
||||
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||
if: ${{ inputs.release_dev_builder_android_image }}
|
||||
env:
|
||||
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
run: |
|
||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||
quay.io/skopeo/stable:latest \
|
||||
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||
|
||||
update-dev-builder-image-tag:
|
||||
name: Update dev-builder image tag
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||
needs: [
|
||||
release-dev-builder-images
|
||||
]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update dev-builder image tag
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
239
.github/workflows/release.yml
vendored
239
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
||||
description: The runner uses to build linux-amd64 artifacts
|
||||
default: ec2-c6i.4xlarge-amd64
|
||||
options:
|
||||
- ubuntu-22.04
|
||||
- ubuntu-22.04-8-cores
|
||||
- ubuntu-22.04-16-cores
|
||||
- ubuntu-22.04-32-cores
|
||||
- ubuntu-22.04-64-cores
|
||||
- ubuntu-20.04
|
||||
- ubuntu-20.04-8-cores
|
||||
- ubuntu-20.04-16-cores
|
||||
- ubuntu-20.04-32-cores
|
||||
- ubuntu-20.04-64-cores
|
||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||
@@ -31,9 +31,8 @@ on:
|
||||
linux_arm64_runner:
|
||||
type: choice
|
||||
description: The runner uses to build linux-arm64 artifacts
|
||||
default: ec2-c6g.8xlarge-arm64
|
||||
default: ec2-c6g.4xlarge-arm64
|
||||
options:
|
||||
- ubuntu-2204-32-cores-arm
|
||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||
- ec2-c6g.4xlarge-arm64 # 16C32G
|
||||
@@ -83,24 +82,27 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.6.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-2022-8-cores
|
||||
windows-runner: windows-latest-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -110,24 +112,15 @@ jobs:
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
|
||||
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
|
||||
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check Rust toolchain version
|
||||
shell: bash
|
||||
run: |
|
||||
./scripts/check-builder-rust-version.sh
|
||||
|
||||
# The create-version will create a global variable named 'version' in the global workflows.
|
||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||
- name: Create version
|
||||
id: create-version
|
||||
@@ -136,13 +129,9 @@ jobs:
|
||||
env:
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Check version
|
||||
id: check-version
|
||||
run: |
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
@@ -179,10 +168,9 @@ jobs:
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -190,8 +178,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-linux-arm64-artifacts:
|
||||
name: Build linux-arm64 artifacts
|
||||
@@ -201,10 +187,9 @@ jobs:
|
||||
]
|
||||
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-linux-artifacts
|
||||
with:
|
||||
@@ -212,8 +197,6 @@ jobs:
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||
|
||||
build-macos-artifacts:
|
||||
name: Build macOS artifacts
|
||||
@@ -225,38 +208,38 @@ jobs:
|
||||
arch: aarch64-apple-darwin
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
arch: aarch64-apple-darwin
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
|
||||
features: pyo3_backend,servers/dashboard
|
||||
arch: x86_64-apple-darwin
|
||||
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-macos-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
|
||||
disable-run-tests: true
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build macos result
|
||||
id: set-build-macos-result
|
||||
run: |
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
strategy:
|
||||
@@ -267,9 +250,11 @@ jobs:
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64
|
||||
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||
arch: x86_64-pc-windows-msvc
|
||||
features: pyo3_backend,servers/dashboard
|
||||
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||
runs-on: ${{ matrix.os }}
|
||||
outputs:
|
||||
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
|
||||
needs: [
|
||||
allocate-runners,
|
||||
]
|
||||
@@ -277,25 +262,20 @@ jobs:
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ./.github/actions/build-windows-artifacts
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||
features: ${{ matrix.features }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||
|
||||
- name: Set build windows result
|
||||
id: set-build-windows-result
|
||||
run: |
|
||||
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
|
||||
|
||||
release-images-to-dockerhub:
|
||||
name: Build and push images to DockerHub
|
||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
@@ -304,30 +284,20 @@ jobs:
|
||||
build-linux-amd64-artifacts,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||
runs-on: ubuntu-2004-16-cores
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build and push images to dockerhub
|
||||
uses: ./.github/actions/build-images
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
@@ -340,23 +310,22 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
runs-on: ubuntu-20.04
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Release artifacts to CN region
|
||||
uses: ./.github/actions/release-cn-artifacts
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||
src-image-name: greptimedb
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -367,9 +336,8 @@ jobs:
|
||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: true
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
@@ -382,12 +350,11 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Publish GitHub release
|
||||
uses: ./.github/actions/publish-github-release
|
||||
@@ -396,22 +363,21 @@ jobs:
|
||||
|
||||
### Stop runners ###
|
||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||
name: Stop linux-amd64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-amd64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -427,17 +393,16 @@ jobs:
|
||||
name: Stop linux-arm64 runner
|
||||
# Only run this job when the runner is allocated.
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [
|
||||
allocate-runners,
|
||||
build-linux-arm64-artifacts,
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Stop EC2 runner
|
||||
uses: ./.github/actions/stop-runner
|
||||
@@ -448,113 +413,3 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
bump-downstream-repo-versions:
|
||||
name: Bump downstream repo versions
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Bump downstream repo versions
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/bump-versions.ts
|
||||
env:
|
||||
TARGET_REPOS: website,docs,demo
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||
|
||||
bump-helm-charts-version:
|
||||
name: Bump helm charts version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump helm charts version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-helm-charts-version.sh
|
||||
|
||||
bump-homebrew-greptime-version:
|
||||
name: Bump homebrew greptime version
|
||||
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
|
||||
needs: [allocate-runners, publish-github-release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Bump homebrew greptime version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||
run: |
|
||||
./.github/scripts/update-homebrew-greptme-version.sh
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub,
|
||||
build-macos-artifacts,
|
||||
build-windows-artifacts,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
- name: Notify release successful result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has completed successfully."}
|
||||
|
||||
- name: Notify release failed result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
26
.github/workflows/schedule.yml
vendored
26
.github/workflows/schedule.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Schedule Management
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
maintenance:
|
||||
name: Periodic Maintenance
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
30
.github/workflows/semantic-pull-request.yml
vendored
30
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
25
.github/workflows/size-label.yml
vendored
Normal file
25
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: size-labeler
|
||||
|
||||
on: [pull_request_target]
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label the PR size
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: codelytv/pr-size-labeler@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
s_label: 'Size: S'
|
||||
s_max_size: '100'
|
||||
m_label: 'Size: M'
|
||||
m_max_size: '500'
|
||||
l_label: 'Size: L'
|
||||
l_max_size: '1000'
|
||||
xl_label: 'Size: XL'
|
||||
fail_if_xl: 'false'
|
||||
message_if_xl: ""
|
||||
files_to_ignore: 'Cargo.lock'
|
||||
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Check user doc labels
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
|
||||
check_labels:
|
||||
name: Check doc labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
21
.gitignore
vendored
21
.gitignore
vendored
@@ -28,7 +28,6 @@ debug/
|
||||
# Logs
|
||||
**/__unittest_logs
|
||||
logs/
|
||||
!grafana/dashboards/logs/
|
||||
|
||||
# cpython's generated python byte code
|
||||
**/__pycache__/
|
||||
@@ -47,23 +46,3 @@ benchmarks/data
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
|
||||
# Fuzz tests
|
||||
tests-fuzz/artifacts/
|
||||
tests-fuzz/corpus/
|
||||
|
||||
# cargo-udeps reports
|
||||
udeps-report.json
|
||||
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
# github
|
||||
!/.github
|
||||
|
||||
# Claude code
|
||||
CLAUDE.md
|
||||
|
||||
@@ -16,7 +16,6 @@ repos:
|
||||
hooks:
|
||||
- id: fmt
|
||||
- id: clippy
|
||||
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||
stages: [pre-push]
|
||||
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
|
||||
stages: [push]
|
||||
- id: cargo-check
|
||||
args: ["--workspace", "--all-targets", "--all-features"]
|
||||
|
||||
43
AUTHOR.md
43
AUTHOR.md
@@ -1,43 +0,0 @@
|
||||
# GreptimeDB Authors
|
||||
|
||||
## Individual Committers (in alphabetical order)
|
||||
|
||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||
* [etolbakov](https://github.com/etolbakov)
|
||||
* [irenjj](https://github.com/irenjj)
|
||||
* [KKould](https://github.com/KKould)
|
||||
* [Lanqing Yang](https://github.com/lyang24)
|
||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||
* [tisonkun](https://github.com/tisonkun)
|
||||
|
||||
## Team Members (in alphabetical order)
|
||||
|
||||
* [apdong2022](https://github.com/apdong2022)
|
||||
* [beryl678](https://github.com/beryl678)
|
||||
* [daviderli614](https://github.com/daviderli614)
|
||||
* [discord9](https://github.com/discord9)
|
||||
* [evenyag](https://github.com/evenyag)
|
||||
* [fengjiachun](https://github.com/fengjiachun)
|
||||
* [fengys1996](https://github.com/fengys1996)
|
||||
* [GrepTime](https://github.com/GrepTime)
|
||||
* [holalengyu](https://github.com/holalengyu)
|
||||
* [killme2008](https://github.com/killme2008)
|
||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||
* [nicecui](https://github.com/nicecui)
|
||||
* [paomian](https://github.com/paomian)
|
||||
* [shuiyisong](https://github.com/shuiyisong)
|
||||
* [sunchanglong](https://github.com/sunchanglong)
|
||||
* [sunng87](https://github.com/sunng87)
|
||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||
* [waynexia](https://github.com/waynexia)
|
||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||
* [WenyXu](https://github.com/WenyXu)
|
||||
* [xtang](https://github.com/xtang)
|
||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||
* [zhongzc](https://github.com/zhongzc)
|
||||
* [ZonaHex](https://github.com/ZonaHex)
|
||||
* [zyy17](https://github.com/zyy17)
|
||||
|
||||
## All Contributors
|
||||
|
||||
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||
132
CODE_OF_CONDUCT.md
Normal file
132
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
info@greptime.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
@@ -2,11 +2,7 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
|
||||
|
||||
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
|
||||
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
|
||||
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
|
||||
- Check the closed issues before opening your issue.
|
||||
- Try to follow the existing style of the code.
|
||||
@@ -30,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
|
||||
|
||||
## License
|
||||
|
||||
@@ -54,19 +50,15 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`.
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`).
|
||||
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
|
||||
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
|
||||
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
|
||||
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
#### `pre-commit` Hooks
|
||||
|
||||
You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run these checks on every commit automatically.
|
||||
|
||||
1. Install `pre-commit`
|
||||
1. Install `pre-commit`
|
||||
|
||||
pip install pre-commit
|
||||
|
||||
@@ -74,7 +66,7 @@ You could setup the [`pre-commit`](https://pre-commit.com/#plugins) hooks to run
|
||||
|
||||
brew install pre-commit
|
||||
|
||||
2. Install the `pre-commit` hooks
|
||||
2. Install the `pre-commit` hooks
|
||||
|
||||
$ pre-commit install
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
@@ -112,7 +104,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
||||
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||
|
||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
|
||||
Also, see some extra GreptimeDB content:
|
||||
|
||||
|
||||
11660
Cargo.lock
generated
11660
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
237
Cargo.toml
237
Cargo.toml
@@ -1,263 +1,153 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/cache",
|
||||
"src/catalog",
|
||||
"src/cli",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
"src/common/catalog",
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/decimal",
|
||||
"src/common/error",
|
||||
"src/common/event-recorder",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
"src/common/grpc",
|
||||
"src/common/grpc-expr",
|
||||
"src/common/macro",
|
||||
"src/common/mem-prof",
|
||||
"src/common/meta",
|
||||
"src/common/options",
|
||||
"src/common/plugins",
|
||||
"src/common/pprof",
|
||||
"src/common/procedure",
|
||||
"src/common/procedure-test",
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/sql",
|
||||
"src/common/stat",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
"src/common/time",
|
||||
"src/common/decimal",
|
||||
"src/common/version",
|
||||
"src/common/wal",
|
||||
"src/common/workload",
|
||||
"src/datanode",
|
||||
"src/datatypes",
|
||||
"src/file-engine",
|
||||
"src/flow",
|
||||
"src/frontend",
|
||||
"src/index",
|
||||
"src/log-query",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
"src/meta-srv",
|
||||
"src/metric-engine",
|
||||
"src/mito-codec",
|
||||
"src/mito2",
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/pipeline",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
"src/query",
|
||||
"src/script",
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"tests-fuzz",
|
||||
"src/index",
|
||||
"tests-integration",
|
||||
"tests/runner",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.17.2"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
clippy.large_enum_variant = "allow"
|
||||
clippy.doc_overindented_list_items = "allow"
|
||||
clippy.uninlined_format_args = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
||||
[workspace.dependencies]
|
||||
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||
#
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "56.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "56.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-buffer = "56.0"
|
||||
arrow-flight = "56.0"
|
||||
arrow-ipc = { version = "56.0", default-features = false, features = ["lz4", "zstd"] }
|
||||
arrow-schema = { version = "56.0", features = ["serde"] }
|
||||
aquamarine = "0.3"
|
||||
arrow = { version = "47.0" }
|
||||
arrow-array = "47.0"
|
||||
arrow-flight = "47.0"
|
||||
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
axum = "0.8"
|
||||
axum-extra = "0.10"
|
||||
axum-macros = "0.5"
|
||||
backon = "1"
|
||||
base64 = "0.22"
|
||||
base64 = "0.21"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
bytes = { version = "1.7", features = ["serde"] }
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
chrono-tz = "0.10.1"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-orc = { git = "https://github.com/GreptimeTeam/datafusion-orc", rev = "a0a5f902158f153119316eaeec868cff3fc8a99d" }
|
||||
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "7d5214512740b4dfb742b6b3d91ed9affcc2c9d0" }
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
|
||||
"tls",
|
||||
"tls-roots",
|
||||
] }
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||
derive_builder = "0.12"
|
||||
etcd-client = "0.12"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "66eb089afa6baaa3ddfafabd0a4abbe317d012c3" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a31ea166fc015ea7ff111ac94e26c3a5d64364d2" }
|
||||
humantime-serde = "1.1"
|
||||
hyper = "1.1"
|
||||
hyper-util = "0.1"
|
||||
itertools = "0.14"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||
mockall = "0.13"
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||
mockall = "0.11.4"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
|
||||
notify = "8.0"
|
||||
num_cpus = "1.16"
|
||||
object_store_opendal = "0.54"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.30", features = [
|
||||
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||
"gen-tonic",
|
||||
"metrics",
|
||||
"trace",
|
||||
"with-serde",
|
||||
"logs",
|
||||
] }
|
||||
ordered-float = { version = "4.3", features = ["serde"] }
|
||||
parking_lot = "0.12"
|
||||
parquet = { version = "56.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||
parquet = "47.0"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
pretty_assertions = "1.4.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
prost-types = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
prost = "0.12"
|
||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.25"
|
||||
rstest_reuse = "0.7"
|
||||
rskafka = "0.5"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
sea-query = "0.32"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "39e4fc94c3c741981f77e9d63b5ce8c02e0a27ea", features = [
|
||||
serde_json = "1.0"
|
||||
smallvec = "1"
|
||||
snafu = "0.7"
|
||||
# on branch v0.38.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # branch = "v0.55.x"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
strum = { version = "0.27", features = ["derive"] }
|
||||
sysinfo = "0.33"
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.47", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = "0.1"
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] }
|
||||
tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
tracing = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||
typetag = "0.2"
|
||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||
vrl = "0.25"
|
||||
zstd = "0.13"
|
||||
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||
tonic = { version = "0.10", features = ["tls"] }
|
||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
cli = { path = "src/cli" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd", default-features = false }
|
||||
cmd = { path = "src/cmd" }
|
||||
common-base = { path = "src/common/base" }
|
||||
common-catalog = { path = "src/common/catalog" }
|
||||
common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-event-recorder = { path = "src/common/event-recorder" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
@@ -265,76 +155,49 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
|
||||
common-macro = { path = "src/common/macro" }
|
||||
common-mem-prof = { path = "src/common/mem-prof" }
|
||||
common-meta = { path = "src/common/meta" }
|
||||
common-options = { path = "src/common/options" }
|
||||
common-plugins = { path = "src/common/plugins" }
|
||||
common-pprof = { path = "src/common/pprof" }
|
||||
common-procedure = { path = "src/common/procedure" }
|
||||
common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-sql = { path = "src/common/sql" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
common-version = { path = "src/common/version" }
|
||||
common-wal = { path = "src/common/wal" }
|
||||
common-workload = { path = "src/common/workload" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend", default-features = false }
|
||||
index = { path = "src/index" }
|
||||
log-query = { path = "src/log-query" }
|
||||
frontend = { path = "src/frontend" }
|
||||
log-store = { path = "src/log-store" }
|
||||
meta-client = { path = "src/meta-client" }
|
||||
meta-srv = { path = "src/meta-srv" }
|
||||
metric-engine = { path = "src/metric-engine" }
|
||||
mito-codec = { path = "src/mito-codec" }
|
||||
mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
|
||||
"server",
|
||||
] }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
query = { path = "src/query" }
|
||||
script = { path = "src/script" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
stat = { path = "src/common/stat" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
[profile.nightly]
|
||||
inherits = "release"
|
||||
strip = "debuginfo"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
[profile.ci]
|
||||
inherits = "dev"
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.tests-fuzz]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
||||
|
||||
[build]
|
||||
pre-build = [
|
||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||
@@ -8,8 +5,3 @@ pre-build = [
|
||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||
]
|
||||
|
||||
[build.env]
|
||||
passthrough = [
|
||||
"JEMALLOC_SYS_WITH_LG_PAGE",
|
||||
]
|
||||
|
||||
95
Makefile
95
Makefile
@@ -3,12 +3,10 @@ CARGO_PROFILE ?=
|
||||
FEATURES ?=
|
||||
TARGET_DIR ?=
|
||||
TARGET ?=
|
||||
BUILD_BIN ?= greptime
|
||||
CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-f55023f3-20250829091211
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -16,13 +14,12 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
|
||||
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||
SQLNESS_OPTS ?=
|
||||
|
||||
# The arguments for running integration tests.
|
||||
ETCD_VERSION ?= v3.5.9
|
||||
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
|
||||
RETRY_COUNT ?= 3
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend
|
||||
NEXTEST_OPTS := --retries ${RETRY_COUNT}
|
||||
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
|
||||
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
|
||||
BUILD_JOBS := 1
|
||||
@@ -32,10 +29,6 @@ ifneq ($(strip $(BUILD_JOBS)),)
|
||||
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_JOBS)),)
|
||||
SQLNESS_OPTS += --jobs ${BUILD_JOBS}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(CARGO_PROFILE)),)
|
||||
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
|
||||
endif
|
||||
@@ -52,20 +45,12 @@ ifneq ($(strip $(TARGET)),)
|
||||
CARGO_BUILD_OPTS += --target ${TARGET}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(BUILD_BIN)),)
|
||||
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
|
||||
endif
|
||||
|
||||
ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
@@ -80,11 +65,11 @@ endif
|
||||
build: ## Build debug version greptime.
|
||||
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
|
||||
|
||||
.PHONY: build-by-dev-builder
|
||||
.POHNY: build-by-dev-builder
|
||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="${CARGO_EXTENSION}" \
|
||||
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||
@@ -98,7 +83,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
|
||||
build-android-bin: ## Build greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
make build \
|
||||
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
|
||||
CARGO_PROFILE=release \
|
||||
@@ -112,8 +97,8 @@ build-android-bin: ## Build greptime binary for android.
|
||||
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean the project.
|
||||
@@ -152,34 +137,23 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
|
||||
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
|
||||
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
|
||||
|
||||
.PHONY: multi-platform-buildx
|
||||
multi-platform-buildx: ## Create buildx multi-platform builder.
|
||||
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
|
||||
|
||||
##@ Test
|
||||
.PHONY: test
|
||||
test: nextest ## Run unit and integration tests.
|
||||
cargo nextest run ${NEXTEST_OPTS}
|
||||
|
||||
.PHONY: nextest
|
||||
nextest: ## Install nextest tools.
|
||||
.PHONY: nextest ## Install nextest tools.
|
||||
nextest:
|
||||
cargo --list | grep nextest || cargo install cargo-nextest --locked
|
||||
|
||||
.PHONY: sqlness-test
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness ${SQLNESS_OPTS}
|
||||
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
.PHONY: fuzz
|
||||
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls: ## List all fuzz targets.
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
cargo sqlness
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
@@ -189,26 +163,9 @@ check: ## Cargo check all the targets.
|
||||
clippy: ## Check clippy rules.
|
||||
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: fix-clippy
|
||||
fix-clippy: ## Fix clippy violations.
|
||||
cargo clippy --workspace --all-targets --all-features --fix
|
||||
|
||||
.PHONY: check-udeps
|
||||
check-udeps: ## Check unused dependencies.
|
||||
cargo udeps --workspace --all-targets
|
||||
|
||||
.PHONY: fix-udeps
|
||||
fix-udeps: ## Remove unused dependencies automatically.
|
||||
@echo "Running cargo-udeps to find unused dependencies..."
|
||||
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
|
||||
@echo "Removing unused dependencies..."
|
||||
@python3 scripts/fix-udeps.py udeps-report.json
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: ## Check code format.
|
||||
cargo fmt --all -- --check
|
||||
python3 scripts/check-snafu.py
|
||||
python3 scripts/check-super-imports.py
|
||||
|
||||
.PHONY: start-etcd
|
||||
start-etcd: ## Start single node etcd for testing purpose.
|
||||
@@ -222,37 +179,9 @@ stop-etcd: ## Stop single node etcd for testing purpose.
|
||||
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
docker run --network=host \
|
||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: start-cluster
|
||||
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
.PHONY: stop-cluster
|
||||
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
|
||||
|
||||
##@ Grafana
|
||||
|
||||
.PHONY: check-dashboards
|
||||
check-dashboards: ## Check the Grafana dashboards.
|
||||
@./grafana/scripts/check.sh
|
||||
|
||||
.PHONY: dashboards
|
||||
dashboards: ## Generate the Grafana dashboards for standalone mode and intermediate dashboards.
|
||||
@./grafana/scripts/gen-dashboards.sh
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb/config \
|
||||
toml2docs/toml2docs:v0.1.3 \
|
||||
-p '##' \
|
||||
-t ./config-docs-template.md \
|
||||
-o ./config.md
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
|
||||
307
README.md
307
README.md
@@ -1,218 +1,185 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png" width="400px">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
|
||||
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2>
|
||||
|
||||
> Delivers sub-second querying at PB scale and exceptional cost efficiency from edge to cloud.
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||
</h4>
|
||||
The next-generation hybrid time-series/analytics processing database in the cloud
|
||||
</h3>
|
||||
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/greptime/greptimedb/">
|
||||
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
|
||||
</a>
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
|
||||
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
|
||||
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
|
||||
</a>
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
|
||||
</a>
|
||||
<p align="center">
|
||||
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
|
||||
|
||||
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
|
||||
|
||||
<a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
|
||||
</p>
|
||||
|
||||
<br/>
|
||||
<p align="center">
|
||||
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
|
||||
|
||||
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
|
||||
|
||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||
</p>
|
||||
|
||||
<a href="https://greptime.com/slack">
|
||||
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
|
||||
</a>
|
||||
<a href="https://twitter.com/greptime">
|
||||
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/greptime/">
|
||||
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
|
||||
</a>
|
||||
</div>
|
||||
> [!WARNING]
|
||||
> Our default branch has changed from `develop` to `main` (issue [#3025](https://github.com/GreptimeTeam/greptimedb/issues/3025)). Please update your local repository to use the `main` branch.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [⭐ Key Features](#features)
|
||||
- [Quick Comparison](#quick-comparison)
|
||||
- [Architecture](#architecture)
|
||||
- [Try GreptimeDB](#try-greptimedb)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Build From Source](#build-from-source)
|
||||
- [Tools & Extensions](#tools--extensions)
|
||||
- [Project Status](#project-status)
|
||||
- [Community](#community)
|
||||
- [License](#license)
|
||||
- [Commercial Support](#commercial-support)
|
||||
- [Contributing](#contributing)
|
||||
- [Acknowledgement](#acknowledgement)
|
||||
## What is GreptimeDB
|
||||
|
||||
## Introduction
|
||||
GreptimeDB is an open-source time-series database with a special focus on
|
||||
scalability, analytical capabilities and efficiency. It's designed to work on
|
||||
infrastructure of the cloud era, and users benefit from its elasticity and commodity
|
||||
storage.
|
||||
|
||||
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
|
||||
Our core developers have been building time-series data platform
|
||||
for years. Based on their best-practices, GreptimeDB is born to give you:
|
||||
|
||||
## Features
|
||||
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
|
||||
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
|
||||
- Flexible indexes, tackling high cardinality issues down
|
||||
- Distributed, parallel query execution, leveraging elastic computing resource
|
||||
- Native SQL, and Python scripting for advanced analytical scenarios
|
||||
- Widely adopted database protocols and APIs, native PromQL supports
|
||||
- Extensible table engine architecture for extensive workloads
|
||||
|
||||
| Feature | Description |
|
||||
| --------- | ----------- |
|
||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
## Quick Start
|
||||
|
||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||
### [GreptimePlay](https://greptime.com/playground)
|
||||
|
||||
## Quick Comparison
|
||||
Try out the features of GreptimeDB right from your browser.
|
||||
|
||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
||||
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
|
||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
||||
| Integration | REST, SQL, Common protocols | Varies | Varies |
|
||||
### Build
|
||||
|
||||
**Performance:**
|
||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
||||
* [TSBS Benchmark](https://github.com/GreptimeTeam/greptimedb/tree/main/docs/benchmarks/tsbs)
|
||||
#### Build from Source
|
||||
|
||||
Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/features-that-you-concern#how-is-greptimedbs-performance-compared-to-other-solutions).
|
||||
To compile GreptimeDB from source, you'll need:
|
||||
|
||||
## Architecture
|
||||
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
|
||||
available as `build-essential` on ubuntu and similar name on other platforms.
|
||||
- Rust: the easiest way to install Rust is to use
|
||||
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
|
||||
install correct Rust version for you.
|
||||
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
|
||||
available from major package manager on macos and linux distributions. You can
|
||||
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
|
||||
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
|
||||
keyword. You can check it with `protoc --version`.
|
||||
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
|
||||
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
|
||||
scripting engine(In CPython Mode). This is available as `python3-dev` on
|
||||
ubuntu, you can install it with `sudo apt install python3-dev`, or
|
||||
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
|
||||
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
|
||||
|
||||
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
|
||||
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
||||
#### Build with Docker
|
||||
|
||||
## Try GreptimeDB
|
||||
A docker image with necessary dependencies is provided:
|
||||
|
||||
### 1. [Live Demo](https://greptime.com/playground)
|
||||
|
||||
Experience GreptimeDB directly in your browser.
|
||||
|
||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
||||
|
||||
Start instantly with a free cluster.
|
||||
|
||||
### 3. Docker (Local Quickstart)
|
||||
|
||||
```shell
|
||||
docker pull greptime/greptimedb
|
||||
```
|
||||
docker build --network host -f docker/Dockerfile -t greptimedb .
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
--rpc-bind-addr 0.0.0.0:4001 \
|
||||
--mysql-addr 0.0.0.0:4002 \
|
||||
--postgres-addr 0.0.0.0:4003
|
||||
### Run
|
||||
|
||||
Start GreptimeDB from source code, in standalone mode:
|
||||
|
||||
```
|
||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
||||
|
||||
**Troubleshooting:**
|
||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
||||
* Failed to start? Check the container logs with `docker logs greptime` for further details.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Quickstart](https://docs.greptime.com/getting-started/quick-start)
|
||||
- [User Guide](https://docs.greptime.com/user-guide/overview)
|
||||
- [Demo Scenes](https://github.com/GreptimeTeam/demo-scene)
|
||||
- [FAQ](https://docs.greptime.com/faq-and-others/faq)
|
||||
|
||||
## Build From Source
|
||||
|
||||
**Prerequisites:**
|
||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||
* Python toolchain (optional): Required only if using some test scripts.
|
||||
|
||||
**Build and Run:**
|
||||
```bash
|
||||
make
|
||||
cargo run -- standalone start
|
||||
```
|
||||
|
||||
## Tools & Extensions
|
||||
Or if you built from docker:
|
||||
|
||||
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
||||
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||
```
|
||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||
```
|
||||
|
||||
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||
|
||||
### Get started
|
||||
|
||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||
|
||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||
|
||||
## Resources
|
||||
|
||||
### Installation
|
||||
|
||||
- [Pre-built Binaries](https://greptime.com/download):
|
||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
|
||||
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
|
||||
Kubernetes deployment
|
||||
|
||||
### Documentation
|
||||
|
||||
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
|
||||
- GreptimeDB [Developer
|
||||
Guide](https://docs.greptime.com/developer-guide/overview.html)
|
||||
- GreptimeDB [internal code document](https://greptimedb.rs)
|
||||
|
||||
### Dashboard
|
||||
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
|
||||
|
||||
### SDK
|
||||
|
||||
- [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
|
||||
- [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
|
||||
- [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
|
||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||
|
||||
## Project Status
|
||||
|
||||
> **Status:** Beta.
|
||||
> **GA (v1.0):** Targeted for mid 2025.
|
||||
This project is in its early stage and under heavy development. We move fast and
|
||||
break things. Benchmark on development branch may not represent its potential
|
||||
performance. We release pre-built binaries constantly for functional
|
||||
evaluation. Do not use it in production at the moment.
|
||||
|
||||
- Being used in production by early adopters
|
||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
||||
- Suitable for evaluation and pilot deployments
|
||||
|
||||
For production use, we recommend using the latest stable release.
|
||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
||||
|
||||
If you find this project useful, a ⭐ would mean a lot to us!
|
||||
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
|
||||
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
|
||||
|
||||
## Community
|
||||
|
||||
We invite you to engage and contribute!
|
||||
Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
|
||||
ask for help by filling an issue with a detailed description of what you were trying to do
|
||||
and what went wrong. If you have any questions or if you would like to get involved in our
|
||||
community, please check out:
|
||||
|
||||
- [Slack](https://greptime.com/slack)
|
||||
- [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- [Official Website](https://greptime.com/)
|
||||
- [Blog](https://greptime.com/blogs/)
|
||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
||||
- [X (Twitter)](https://X.com/greptime)
|
||||
- [YouTube](https://www.youtube.com/@greptime)
|
||||
- GreptimeDB Community on [Slack](https://greptime.com/slack)
|
||||
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||
- Greptime official [Website](https://greptime.com)
|
||||
|
||||
In addition, you may:
|
||||
|
||||
- View our official [Blog](https://greptime.com/blogs/index)
|
||||
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
|
||||
- Follow us on [Twitter](https://twitter.com/greptime)
|
||||
|
||||
## License
|
||||
|
||||
GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt).
|
||||
GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
|
||||
open contributions and allowing you to use the software however you want.
|
||||
|
||||
## Commercial Support
|
||||
|
||||
Running GreptimeDB in your organization?
|
||||
We offer enterprise add-ons, services, training, and consulting.
|
||||
[Contact us](https://greptime.com/contactus) for details.
|
||||
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
|
||||
|
||||
## Contributing
|
||||
|
||||
- Read our [Contribution Guidelines](https://github.com/GreptimeTeam/greptimedb/blob/main/CONTRIBUTING.md).
|
||||
- Explore [Internal Concepts](https://docs.greptime.com/contributor-guide/overview.html) and [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb).
|
||||
- Pick up a [good first issue](https://github.com/GreptimeTeam/greptimedb/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and join the #contributors [Slack](https://greptime.com/slack) channel.
|
||||
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Special thanks to all contributors! See [AUTHORS.md](https://github.com/GreptimeTeam/greptimedb/blob/main/AUTHOR.md).
|
||||
|
||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||
|
||||
16
benchmarks/Cargo.toml
Normal file
16
benchmarks/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
client.workspace = true
|
||||
futures-util.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
parquet.workspace = true
|
||||
tokio.workspace = true
|
||||
543
benchmarks/src/bin/nyc-taxi.rs
Normal file
543
benchmarks/src/bin/nyc-taxi.rs
Normal file
@@ -0,0 +1,543 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
struct Args {
|
||||
/// Path to the dataset
|
||||
#[arg(short, long)]
|
||||
path: Option<String>,
|
||||
|
||||
/// Batch size of insert request.
|
||||
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
||||
batch_size: usize,
|
||||
|
||||
/// Number of client threads on write (parallel on file level)
|
||||
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
||||
thread_num: usize,
|
||||
|
||||
/// Number of query iteration
|
||||
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
||||
iter_num: usize,
|
||||
|
||||
#[arg(long = "skip-write")]
|
||||
skip_write: bool,
|
||||
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
std::fs::read_dir(path)
|
||||
.unwrap()
|
||||
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
mpb: MultiProgress,
|
||||
pb_style: ProgressStyle,
|
||||
) -> u128 {
|
||||
let file = std::fs::File::open(&path).unwrap();
|
||||
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
||||
let row_num = record_batch_reader_builder
|
||||
.metadata()
|
||||
.file_metadata()
|
||||
.num_rows();
|
||||
let record_batch_reader = record_batch_reader_builder
|
||||
.with_batch_size(batch_size)
|
||||
.build()
|
||||
.unwrap();
|
||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||
progress_bar.set_style(pb_style);
|
||||
progress_bar.set_message(format!("{path:?}"));
|
||||
|
||||
let mut total_rpc_elapsed_ms = 0;
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
}
|
||||
|
||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let schema = record_batch.schema();
|
||||
let fields = schema.fields();
|
||||
let row_count = record_batch.num_rows();
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
values: Some(values),
|
||||
null_mask: array
|
||||
.to_data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
DataType::Null
|
||||
| DataType::Boolean
|
||||
| DataType::Int8
|
||||
| DataType::Int16
|
||||
| DataType::Int32
|
||||
| DataType::UInt8
|
||||
| DataType::UInt16
|
||||
| DataType::UInt32
|
||||
| DataType::UInt64
|
||||
| DataType::Float16
|
||||
| DataType::Float32
|
||||
| DataType::Date32
|
||||
| DataType::Date64
|
||||
| DataType::Time32(_)
|
||||
| DataType::Time64(_)
|
||||
| DataType::Duration(_)
|
||||
| DataType::Interval(_)
|
||||
| DataType::Binary
|
||||
| DataType::FixedSizeBinary(_)
|
||||
| DataType::LargeBinary
|
||||
| DataType::LargeUtf8
|
||||
| DataType::List(_)
|
||||
| DataType::FixedSizeList(_, _)
|
||||
| DataType::LargeList(_)
|
||||
| DataType::Struct(_)
|
||||
| DataType::Union(_, _)
|
||||
| DataType::Dictionary(_, _)
|
||||
| DataType::Decimal128(_, _)
|
||||
| DataType::Decimal256(_, _)
|
||||
| DataType::RunEndEncoded(_, _)
|
||||
| DataType::Map(_, _) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: "".to_string(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("##-");
|
||||
let multi_progress_bar = MultiProgress::new();
|
||||
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
||||
file_progress.inc(0);
|
||||
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res {
|
||||
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||
Output::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(args.thread_num)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
127
cliff.toml
127
cliff.toml
@@ -1,127 +0,0 @@
|
||||
# https://git-cliff.org/docs/configuration
|
||||
|
||||
[remote.github]
|
||||
owner = "GreptimeTeam"
|
||||
repo = "greptimedb"
|
||||
|
||||
[changelog]
|
||||
header = ""
|
||||
footer = ""
|
||||
# template for the changelog body
|
||||
# https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
# {{ version }}
|
||||
|
||||
Release date: {{ timestamp | date(format="%B %d, %Y") }}
|
||||
|
||||
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
|
||||
{%- if breakings | length > 0 %}
|
||||
|
||||
## Breaking changes
|
||||
{% for commit in breakings %}
|
||||
* {{ commit.github.pr_title }}\
|
||||
{% if commit.github.username %} by \
|
||||
{% set author = commit.github.username -%}
|
||||
[@{{ author }}](https://github.com/{{ author }})
|
||||
{%- endif -%}
|
||||
{% if commit.github.pr_number %} in \
|
||||
{% set number = commit.github.pr_number -%}
|
||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif -%}
|
||||
|
||||
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
|
||||
{% for group, commits in grouped_commits %}
|
||||
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
* {{ commit.github.pr_title }}\
|
||||
{% if commit.github.username %} by \
|
||||
{% set author = commit.github.username -%}
|
||||
[@{{ author }}](https://github.com/{{ author }})
|
||||
{%- endif -%}
|
||||
{% if commit.github.pr_number %} in \
|
||||
{% set number = commit.github.pr_number -%}
|
||||
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
{% endfor %}
|
||||
|
||||
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## New Contributors
|
||||
{% endif -%}
|
||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
|
||||
{%- if contributor.pr_number %} in \
|
||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if github.contributors | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## All Contributors
|
||||
|
||||
We would like to thank the following contributors from the GreptimeDB community:
|
||||
|
||||
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
|
||||
{%- set bots = ['dependabot[bot]'] %}
|
||||
|
||||
{% for contributor in contributors %}
|
||||
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
|
||||
{%- if loop.first -%}
|
||||
[@{{ contributor }}](https://github.com/{{ contributor }})
|
||||
{%- else -%}
|
||||
, [@{{ contributor }}](https://github.com/{{ contributor }})
|
||||
{%- endif -%}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{% raw %}\n{% endraw %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
trim = true
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
||||
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
||||
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
||||
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
||||
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||
{ message = "^chore\\(pr\\)", skip = true },
|
||||
{ message = "^chore\\(pull\\)", skip = true },
|
||||
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
||||
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# regex for matching git tags
|
||||
# tag_pattern = "v[0-9].*"
|
||||
# regex for skipping tags
|
||||
# skip_tags = ""
|
||||
# regex for ignoring tags
|
||||
ignore_tags = ".*-nightly-.*"
|
||||
# sort the tags topologically
|
||||
topo_order = false
|
||||
# sort the commits inside sections by oldest/newest order
|
||||
sort_commits = "oldest"
|
||||
# limit the number of commits included in the changelog.
|
||||
# limit_commits = 42
|
||||
@@ -8,6 +8,5 @@ coverage:
|
||||
ignore:
|
||||
- "**/error*.rs" # ignore all error.rs files
|
||||
- "tests/runner/*.rs" # ignore integration test runner
|
||||
- "tests-integration/**/*.rs" # ignore integration tests
|
||||
comment: # this is a top-level key
|
||||
layout: "diff"
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Configurations
|
||||
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
{{ toml2docs "./standalone.example.toml" }}
|
||||
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
{{ toml2docs "./frontend.example.toml" }}
|
||||
|
||||
### Metasrv
|
||||
|
||||
{{ toml2docs "./metasrv.example.toml" }}
|
||||
|
||||
### Datanode
|
||||
|
||||
{{ toml2docs "./datanode.example.toml" }}
|
||||
|
||||
### Flownode
|
||||
|
||||
{{ toml2docs "./flownode.example.toml"}}
|
||||
665
config/config.md
665
config/config.md
@@ -1,665 +0,0 @@
|
||||
# Configurations
|
||||
|
||||
- [Configurations](#configurations)
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
- [Flownode](#flownode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
||||
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
|
||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
|
||||
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
|
||||
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
|
||||
| `mysql.tls` | -- | -- | -- |
|
||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `mysql.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `postgres` | -- | -- | PostgresSQL server options. |
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. |
|
||||
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. |
|
||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
|
||||
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
|
||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
|
||||
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `wal` | -- | -- | -- |
|
||||
| `wal.provider` | String | `raft_engine` | -- |
|
||||
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
|
||||
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
|
||||
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
|
||||
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
|
||||
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Datanode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | Unset | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `wal` | -- | -- | The WAL options. |
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `query` | -- | -- | The query engine options. |
|
||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
|
||||
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
|
||||
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
|
||||
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
|
||||
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
|
||||
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
|
||||
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
|
||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
|
||||
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
|
||||
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
|
||||
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
|
||||
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
|
||||
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
|
||||
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
|
||||
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
|
||||
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
|
||||
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
|
||||
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
|
||||
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
|
||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||
| `region_engine.mito.memtable` | -- | -- | -- |
|
||||
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
|
||||
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
|
||||
|
||||
### Flownode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `flow.batching_mode` | -- | -- | -- |
|
||||
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
|
||||
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
|
||||
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
|
||||
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
|
||||
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
|
||||
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
|
||||
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
|
||||
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
|
||||
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
|
||||
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
|
||||
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
|
||||
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
|
||||
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
|
||||
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
|
||||
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
|
||||
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
|
||||
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
|
||||
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
|
||||
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
|
||||
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
| `memory` | -- | -- | The memory options. |
|
||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||
@@ -1,702 +1,139 @@
|
||||
## The datanode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# The datanode identifier, should be unique.
|
||||
node_id = 42
|
||||
|
||||
## Start services after regions have obtained leases.
|
||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
# gRPC server address, "127.0.0.1:3001" by default.
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
# Hostname of this node.
|
||||
rpc_hostname = "127.0.0.1"
|
||||
# The number of gRPC server worker threads, 8 by default.
|
||||
rpc_runtime_size = 8
|
||||
# Start services after regions have obtained leases.
|
||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||
require_lease_before_startup = false
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
# Initialize all regions in the background during the startup.
|
||||
# By default, it provides services after all regions have been initialized.
|
||||
initialize_region_in_background = false
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:3001"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:3001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
## Compression mode for datanode side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||
interval = "3s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The metasrv client options.
|
||||
# Metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
# Metasrv address list.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
# Heartbeat timeout, 500 milliseconds by default.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
# Operation timeout, 3 seconds by default.
|
||||
timeout = "3s"
|
||||
# Connect server timeout, 1 second by default.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The WAL options.
|
||||
# WAL options.
|
||||
# Currently, users are expected to choose the wal through the provider field.
|
||||
# When a wal provider is chose, the user should comment out all other wal config
|
||||
# except those corresponding to the chosen one.
|
||||
[wal]
|
||||
## The provider of the WAL.
|
||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||
# WAL data directory
|
||||
provider = "raft_engine"
|
||||
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
# Raft-engine wal options, see `standalone.example.toml`.
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
file_size = "256MB"
|
||||
purge_threshold = "4GB"
|
||||
purge_interval = "10m"
|
||||
read_batch_size = 128
|
||||
|
||||
## Whether to use sync write.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_write = false
|
||||
|
||||
## Whether to reuse logically truncated log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
enable_log_recycle = true
|
||||
# Kafka wal options, see `standalone.example.toml`.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# max_batch_size = "4MB"
|
||||
# linger = "200ms"
|
||||
# produce_record_timeout = "100ms"
|
||||
# backoff_init = "500ms"
|
||||
# backoff_max = "10s"
|
||||
# backoff_base = 2
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
## Whether to pre-create log files on start up.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
prefill_log_files = false
|
||||
|
||||
## Duration for fsyncing log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
## Parallelism during WAL recovery.
|
||||
recovery_parallelism = 2
|
||||
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## Whether to enable WAL index creation.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_index = true
|
||||
|
||||
## The interval for dumping WAL indexes.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
dump_index_interval = "60s"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
## This option ensures that when Kafka messages are deleted, the system
|
||||
## can still successfully replay memtable data without throwing an
|
||||
## out-of-range error.
|
||||
## However, enabling this option might lead to unexpected data loss,
|
||||
## as the system will skip over missing entries instead of treating
|
||||
## them as critical errors.
|
||||
overwrite_entry_start_id = false
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
# type = "Oss"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# access_key_secret = "123456"
|
||||
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||
|
||||
# Example of using Azblob as the storage.
|
||||
# [storage]
|
||||
# type = "Azblob"
|
||||
# container = "greptimedb"
|
||||
# root = "data"
|
||||
# account_name = "test"
|
||||
# account_key = "123456"
|
||||
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||
# sas_token = ""
|
||||
|
||||
# Example of using Gcs as the storage.
|
||||
# [storage]
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## The data storage options.
|
||||
# Storage options, see `standalone.example.toml`.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
## - `S3`: the data is stored in the S3 object storage.
|
||||
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## @toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# The local file cache directory
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
|
||||
## The Mito engine options.
|
||||
[region_engine.mito]
|
||||
|
||||
## Number of region workers.
|
||||
#+ num_workers = 8
|
||||
|
||||
## Request channel size of each worker.
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
|
||||
## Max batch size for a worker to handle requests.
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
|
||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
|
||||
|
||||
## Number of removed files to keep in manifest's `removed_files` field before also
|
||||
## remove them from `removed_files`. Mostly for debugging purpose.
|
||||
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
|
||||
## from `removed_files` field.
|
||||
experimental_manifest_keep_removed_file_count = 256
|
||||
|
||||
## How long to keep removed files in the `removed_files` field of manifest
|
||||
## after they are removed from manifest.
|
||||
## files will only be removed from `removed_files` field
|
||||
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
|
||||
experimental_manifest_keep_removed_file_ttl = "1h"
|
||||
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
|
||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_flushes = 4
|
||||
|
||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_compactions = 2
|
||||
|
||||
## Max number of running background purge jobs (default: number of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_size = "1GB"
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_reject_size = "2GB"
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ sst_meta_cache_size = "128MB"
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 384
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The TTL of the staging directory.
|
||||
## Defaults to 7 days.
|
||||
## Setting it to "0s" to disable TTL.
|
||||
staging_ttl = "7d"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter index in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for the index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
## - `partition_tree`: partition tree memtable (experimental)
|
||||
type = "time_series"
|
||||
|
||||
## The max number of keys in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
index_max_keys_per_shard = 8192
|
||||
|
||||
## The max rows of data inside the actively writing buffer in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
data_freeze_threshold = 32768
|
||||
|
||||
## Max dictionary bytes.
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
[[region_engine]]
|
||||
## Metric engine options.
|
||||
[region_engine.metric]
|
||||
## Whether to enable the experimental sparse primary key encoding.
|
||||
experimental_sparse_primary_key_encoding = false
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
# Datanode export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
node_id = 14
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
[flow.batching_mode]
|
||||
## The default batching engine query timeout is 10 minutes.
|
||||
#+query_timeout="600s"
|
||||
## will output a warn log for any query that runs for more that this threshold
|
||||
#+slow_query_threshold="60s"
|
||||
## The minimum duration between two queries execution by batching mode task
|
||||
#+experimental_min_refresh_duration="5s"
|
||||
## The gRPC connection timeout
|
||||
#+grpc_conn_timeout="5s"
|
||||
## The gRPC max retry number
|
||||
#+experimental_grpc_max_retries=3
|
||||
## Flow wait for available frontend timeout,
|
||||
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
|
||||
## which prevent flownode from starting
|
||||
#+experimental_frontend_scan_timeout="30s"
|
||||
## Frontend activity timeout
|
||||
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
|
||||
## it will be removed from the list that flownode use to connect
|
||||
#+experimental_frontend_activity_timeout="60s"
|
||||
## Maximum number of filters allowed in a single query
|
||||
#+experimental_max_filter_num_per_query=20
|
||||
## Time window merge distance
|
||||
#+experimental_time_window_merge_threshold=3
|
||||
## Read preference of the Frontend client.
|
||||
#+read_preference="Leader"
|
||||
[flow.batching_mode.frontend_tls]
|
||||
## Whether to enable TLS for client.
|
||||
#+enabled=false
|
||||
## Server Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
#+server_ca_cert_path=""
|
||||
## Client Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
#+client_cert_path=""
|
||||
## Client Private key file path.
|
||||
## @toml2docs:none-default
|
||||
#+client_key_path=""
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:6800"
|
||||
## The address advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
server_addr = "127.0.0.1:6800"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "3s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
[query]
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
@@ -1,340 +1,97 @@
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
# Node running mode, see `standalone.example.toml`.
|
||||
mode = "distributed"
|
||||
# The default timezone of the server
|
||||
# default_timezone = "UTC"
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
interval = "18s"
|
||||
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||
interval = "5s"
|
||||
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||
retry_interval = "5s"
|
||||
|
||||
## Interval for retrying to send heartbeat messages to the metasrv.
|
||||
retry_interval = "3s"
|
||||
|
||||
## The HTTP server options.
|
||||
# HTTP server options, see `standalone.example.toml`.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
timeout = "30s"
|
||||
body_limit = "64MB"
|
||||
## HTTP CORS support, it's turned on by default
|
||||
## This allows browser to access http APIs without CORS restrictions
|
||||
enable_cors = true
|
||||
## Customize allowed origins for HTTP CORS.
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
# gRPC server options, see `standalone.example.toml`.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4001"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
addr = "127.0.0.1:4001"
|
||||
runtime_size = 8
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
|
||||
[internal_grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4010"
|
||||
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||
server_addr = "127.0.0.1:4010"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## Compression mode for frontend side Arrow IPC service. Available options:
|
||||
## - `none`: disable all compression
|
||||
## - `transport`: only enable gRPC transport compression (zstd)
|
||||
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
|
||||
## - `all`: enable all compression.
|
||||
## Default to `none`
|
||||
flight_compression = "arrow_ipc"
|
||||
|
||||
## internal gRPC server TLS options, see `mysql.tls` section.
|
||||
[internal_grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
|
||||
## MySQL server options.
|
||||
# MySQL server options, see `standalone.example.toml`.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
enable = true
|
||||
## The addr to bind the MySQL server.
|
||||
addr = "127.0.0.1:4002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||
prepared_stmt_cache_size = 10000
|
||||
|
||||
# MySQL server TLS options.
|
||||
# MySQL server TLS options, see `standalone.example.toml`.
|
||||
[mysql.tls]
|
||||
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - `disable` (default value)
|
||||
## - `prefer`
|
||||
## - `require`
|
||||
## - `verify-ca`
|
||||
## - `verify-full`
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
## PostgresSQL server options.
|
||||
# PostgresSQL server options, see `standalone.example.toml`.
|
||||
[postgres]
|
||||
## Whether to enable
|
||||
enable = true
|
||||
## The addr to bind the PostgresSQL server.
|
||||
addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
# PostgresSQL server TLS options, see `standalone.example.toml`.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
# OpenTSDB protocol options, see `standalone.example.toml`.
|
||||
[opentsdb]
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
enable = true
|
||||
addr = "127.0.0.1:4242"
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
# InfluxDB protocol options, see `standalone.example.toml`.
|
||||
[influxdb]
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Jaeger protocol options.
|
||||
[jaeger]
|
||||
## Whether to enable Jaeger protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Prometheus remote storage options
|
||||
# Prometheus remote storage options, see `standalone.example.toml`.
|
||||
[prom_store]
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
enable = true
|
||||
## Whether to store the data from Prometheus remote write in metric engine.
|
||||
with_metric_engine = true
|
||||
|
||||
## The metasrv client options.
|
||||
# Metasrv client options, see `datanode.example.toml`.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
# DDL timeouts options.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
## Whether to allow query fallback when push down optimize fails.
|
||||
## Default to false, meaning when push down optimize failed, return error msg
|
||||
allow_query_fallback = false
|
||||
|
||||
## Datanode options.
|
||||
# Datanode options.
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
# Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The slow query log options.
|
||||
[slow_query]
|
||||
## Whether to enable slow query log.
|
||||
enable = true
|
||||
|
||||
## The record type of slow queries. It can be `system_table` or `log`.
|
||||
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.
|
||||
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`.
|
||||
record_type = "system_table"
|
||||
|
||||
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`.
|
||||
threshold = "30s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
|
||||
sample_ratio = 1.0
|
||||
|
||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||
ttl = "90d"
|
||||
|
||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||
ttl = "90d"
|
||||
# Frontend export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -1,355 +1,96 @@
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## Store server address default to etcd store.
|
||||
## For postgres store, the format is:
|
||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||
## For etcd store, the format is:
|
||||
## "127.0.0.1:2379"
|
||||
store_addrs = ["127.0.0.1:2379"]
|
||||
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
# The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
# The bind address of metasrv, "127.0.0.1:3002" by default.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
# Etcd server address, "127.0.0.1:2379" by default.
|
||||
store_addr = "127.0.0.1:2379"
|
||||
# Datanode selector type.
|
||||
# - "lease_based" (default value).
|
||||
# - "load_based"
|
||||
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "lease_based"
|
||||
# Store data in memory, false by default.
|
||||
use_memory_store = false
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
# If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## The datastore for meta server.
|
||||
## Available values:
|
||||
## - `etcd_store` (default value)
|
||||
## - `memory_store`
|
||||
## - `postgres_store`
|
||||
## - `mysql_store`
|
||||
backend = "etcd_store"
|
||||
# Log options, see `standalone.example.toml`
|
||||
# [logging]
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# level = "info"
|
||||
|
||||
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
meta_table_name = "greptime_metakv"
|
||||
|
||||
## Optional PostgreSQL schema for metadata table and election table name qualification.
|
||||
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
|
||||
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
|
||||
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
|
||||
## **Only used when backend is `postgres_store`.**
|
||||
|
||||
meta_schema_name = "greptime_schema"
|
||||
|
||||
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||
## Only used when backend is `postgres_store`.
|
||||
meta_election_lock_id = 1
|
||||
|
||||
## Datanode selector type.
|
||||
## - `round_robin` (default value)
|
||||
## - `lease_based`
|
||||
## - `load_based`
|
||||
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||
selector = "round_robin"
|
||||
|
||||
## Store data in memory.
|
||||
use_memory_store = false
|
||||
|
||||
## Whether to enable region failover.
|
||||
## This feature is only available on GreptimeDB running on cluster mode and
|
||||
## - Using Remote WAL
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The delay before starting region failure detection.
|
||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
||||
region_failure_detector_initialization_delay = '10m'
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
|
||||
## Max allowed idle time before removing node info from metasrv memory.
|
||||
node_max_idle_time = "24hours"
|
||||
|
||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||
[backend_tls]
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - "disable" - No TLS
|
||||
## - "prefer" (default) - Try TLS, fallback to plain
|
||||
## - "require" - Require TLS
|
||||
## - "verify_ca" - Require TLS and verify CA
|
||||
## - "verify_full" - Require TLS and verify hostname
|
||||
mode = "prefer"
|
||||
|
||||
## Path to client certificate file (for client authentication)
|
||||
## Like "/path/to/client.crt"
|
||||
cert_path = ""
|
||||
|
||||
## Path to client private key file (for client authentication)
|
||||
## Like "/path/to/client.key"
|
||||
key_path = ""
|
||||
|
||||
## Path to CA certificate file (for server certificate verification)
|
||||
## Required when using custom CAs or self-signed certificates
|
||||
## Leave empty to use system root certificates only
|
||||
## Like "/path/to/ca.crt"
|
||||
ca_cert_path = ""
|
||||
|
||||
## Watch for certificate file changes and auto reload
|
||||
watch = false
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||
server_addr = "127.0.0.1:3002"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## Procedure storage options.
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
## Procedure max retry time.
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 12
|
||||
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
|
||||
## Auto split large value
|
||||
## GreptimeDB procedure uses etcd as the default metadata storage backend.
|
||||
## The etcd the maximum size of any request is 1.5 MiB
|
||||
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
|
||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||
max_metadata_value_size = "1500KiB"
|
||||
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
## The threshold value used by the failure detector to determine failure conditions.
|
||||
threshold = 8.0
|
||||
|
||||
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
|
||||
min_std_deviation = "100ms"
|
||||
|
||||
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
|
||||
acceptable_heartbeat_pause = "10000ms"
|
||||
|
||||
## The initial estimate of the heartbeat interval used by the failure detector.
|
||||
acceptable_heartbeat_pause = "3000ms"
|
||||
first_heartbeat_estimate = "1000ms"
|
||||
|
||||
## Datanode options.
|
||||
[datanode]
|
||||
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "10s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
# # Datanode options.
|
||||
# [datanode]
|
||||
# # Datanode client options.
|
||||
# [datanode.client_options]
|
||||
# timeout = "10s"
|
||||
# connect_timeout = "10s"
|
||||
# tcp_nodelay = true
|
||||
|
||||
[wal]
|
||||
# Available wal providers:
|
||||
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
|
||||
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
provider = "raft_engine"
|
||||
|
||||
## The broker endpoints of the Kafka cluster.
|
||||
##
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_create_topics = true
|
||||
# Kafka wal config.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# Number of partitions per topic.
|
||||
# num_partitions = 1
|
||||
# Expected number of replicas of each partition.
|
||||
# replication_factor = 1
|
||||
# Above which a topic creation operation will be cancelled.
|
||||
# create_topic_timeout = "30s"
|
||||
# The initial backoff for kafka clients.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff for kafka clients.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
## Interval of automatically WAL pruning.
|
||||
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_prune_interval = "30m"
|
||||
|
||||
|
||||
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
|
||||
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
|
||||
## (latest_entry_id - flushed_entry_id) * avg_record_size
|
||||
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
|
||||
## - `latest_entry_id`: The latest entry ID in the topic.
|
||||
## - `flushed_entry_id`: The last flushed entry ID for the region.
|
||||
## Set to "0" to let the system decide the flush trigger size.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
flush_trigger_size = "512MB"
|
||||
|
||||
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
|
||||
## The estimated size is calculated as:
|
||||
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
|
||||
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
|
||||
## Set to "0" to let the system decide the checkpoint trigger size.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
checkpoint_trigger_size = "128MB"
|
||||
|
||||
## Concurrent task limit for automatically WAL pruning.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
auto_prune_parallelism = 10
|
||||
|
||||
## Number of topics used for remote WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
selector_type = "round_robin"
|
||||
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## Only accepts strings that match the following regular expression pattern:
|
||||
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
replication_factor = 1
|
||||
|
||||
## The timeout for creating a Kafka topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_topic_timeout = "30s"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Configuration options for the event recorder.
|
||||
[event_recorder]
|
||||
## TTL for the events table that will be used to store the events. Default is `90d`.
|
||||
ttl = "90d"
|
||||
|
||||
## Configuration options for the stats persistence.
|
||||
[stats_persistence]
|
||||
## TTL for the stats table that will be used to store the stats.
|
||||
## Set to `0s` to disable stats persistence.
|
||||
## Default is `0s`.
|
||||
## If you want to enable stats persistence, set the TTL to a value greater than 0.
|
||||
## It is recommended to set a small value, e.g., `3h`.
|
||||
ttl = "0s"
|
||||
## The interval to persist the stats. Default is `10m`.
|
||||
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
|
||||
interval = "10m"
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
# Metasrv export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
@@ -1,804 +1,240 @@
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
# Node running mode, "standalone" or "distributed".
|
||||
mode = "standalone"
|
||||
# Whether to enable greptimedb telemetry, true by default.
|
||||
enable_telemetry = true
|
||||
# The default timezone of the server
|
||||
# default_timezone = "UTC"
|
||||
|
||||
## Initialize all regions in the background during the startup.
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||
max_concurrent_queries = 0
|
||||
|
||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||
#+ enable_telemetry = true
|
||||
|
||||
## The maximum in-flight write bytes.
|
||||
## @toml2docs:none-default
|
||||
#+ max_in_flight_write_bytes = "500MB"
|
||||
|
||||
## The runtime options.
|
||||
#+ [runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
#+ global_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
#+ compact_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
# HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
# Server address, "127.0.0.1:4000" by default.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
# HTTP request timeout, 30s by default.
|
||||
timeout = "30s"
|
||||
# HTTP request body limit, 64Mb by default.
|
||||
# the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
|
||||
body_limit = "64MB"
|
||||
## HTTP CORS support, it's turned on by default
|
||||
## This allows browser to access http APIs without CORS restrictions
|
||||
enable_cors = true
|
||||
## Customize allowed origins for HTTP CORS.
|
||||
## @toml2docs:none-default
|
||||
cors_allowed_origins = ["https://example.com"]
|
||||
|
||||
## Whether to enable validation for Prometheus remote write requests.
|
||||
## Available options:
|
||||
## - strict: deny invalid UTF-8 strings (default).
|
||||
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
|
||||
## - unchecked: do not valid strings.
|
||||
prom_validation_mode = "strict"
|
||||
|
||||
## The gRPC server options.
|
||||
# gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
bind_addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
# Server address, "127.0.0.1:4001" by default.
|
||||
addr = "127.0.0.1:4001"
|
||||
# The number of server worker threads, 8 by default.
|
||||
runtime_size = 8
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
# MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
# Whether to enable
|
||||
enable = true
|
||||
## The addr to bind the MySQL server.
|
||||
# Server address, "127.0.0.1:4002" by default.
|
||||
addr = "127.0.0.1:4002"
|
||||
## The number of server worker threads.
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
|
||||
prepared_stmt_cache_size= 10000
|
||||
|
||||
# MySQL server TLS options.
|
||||
[mysql.tls]
|
||||
|
||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
## - `disable` (default value)
|
||||
## - `prefer`
|
||||
## - `require`
|
||||
## - `verify-ca`
|
||||
## - `verify-full`
|
||||
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||
# - "disable" (default value)
|
||||
# - "prefer"
|
||||
# - "require"
|
||||
# - "verify-ca"
|
||||
# - "verify-full"
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
# Certificate file path.
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
# Private key file path.
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
## PostgresSQL server options.
|
||||
# PostgresSQL server options.
|
||||
[postgres]
|
||||
## Whether to enable
|
||||
# Whether to enable
|
||||
enable = true
|
||||
## The addr to bind the PostgresSQL server.
|
||||
# Server address, "127.0.0.1:4003" by default.
|
||||
addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
## Server-side keep-alive time.
|
||||
## Set to 0 (default) to disable.
|
||||
keep_alive = "0s"
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
# TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## @toml2docs:none-default
|
||||
# certificate file path.
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## @toml2docs:none-default
|
||||
# private key file path.
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload
|
||||
watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
# OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
# Whether to enable
|
||||
enable = true
|
||||
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
|
||||
addr = "127.0.0.1:4242"
|
||||
# The number of server worker threads, 2 by default.
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
# InfluxDB protocol options.
|
||||
[influxdb]
|
||||
## Whether to enable InfluxDB protocol in HTTP API.
|
||||
# Whether to enable InfluxDB protocol in HTTP API, true by default.
|
||||
enable = true
|
||||
|
||||
## Jaeger protocol options.
|
||||
[jaeger]
|
||||
## Whether to enable Jaeger protocol in HTTP API.
|
||||
enable = true
|
||||
|
||||
## Prometheus remote storage options
|
||||
# Prometheus remote storage options
|
||||
[prom_store]
|
||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||
enable = true
|
||||
## Whether to store the data from Prometheus remote write in metric engine.
|
||||
with_metric_engine = true
|
||||
|
||||
## The WAL options.
|
||||
[wal]
|
||||
## The provider of the WAL.
|
||||
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
|
||||
## - `kafka`: it's remote wal that data is stored in Kafka.
|
||||
# Available wal providers:
|
||||
# - "raft_engine" (default)
|
||||
# - "kafka"
|
||||
provider = "raft_engine"
|
||||
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "./greptimedb_data/wal"
|
||||
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
# Kafka wal options.
|
||||
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||
# broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
# Number of topics to be created upon start.
|
||||
# num_topics = 64
|
||||
# Topic selector type.
|
||||
# Available selector types:
|
||||
# - "round_robin" (default)
|
||||
# selector_type = "round_robin"
|
||||
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
# topic_name_prefix = "greptimedb_wal_topic"
|
||||
# Number of partitions per topic.
|
||||
# num_partitions = 1
|
||||
# Expected number of replicas of each partition.
|
||||
# replication_factor = 1
|
||||
|
||||
## The interval to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
# The maximum log size a kafka batch producer could buffer.
|
||||
# max_batch_size = "4MB"
|
||||
# The linger duration of a kafka batch producer.
|
||||
# linger = "200ms"
|
||||
# The maximum amount of time (in milliseconds) to wait for Kafka records to be returned.
|
||||
# produce_record_timeout = "100ms"
|
||||
# Above which a topic creation operation will be cancelled.
|
||||
# create_topic_timeout = "30s"
|
||||
|
||||
## The read batch size.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
# The initial backoff for kafka clients.
|
||||
# backoff_init = "500ms"
|
||||
# The maximum backoff for kafka clients.
|
||||
# backoff_max = "10s"
|
||||
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
# backoff_base = 2
|
||||
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
# backoff_deadline = "5mins"
|
||||
|
||||
# WAL data directory
|
||||
# dir = "/tmp/greptimedb/wal"
|
||||
# WAL file size in bytes.
|
||||
file_size = "256MB"
|
||||
# WAL purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
# WAL purge interval in seconds.
|
||||
purge_interval = "10m"
|
||||
# WAL read batch size.
|
||||
read_batch_size = 128
|
||||
|
||||
## Whether to use sync write.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
# Whether to sync log file after every write.
|
||||
sync_write = false
|
||||
|
||||
## Whether to reuse logically truncated log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
enable_log_recycle = true
|
||||
|
||||
## Whether to pre-create log files on start up.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
prefill_log_files = false
|
||||
|
||||
## Duration for fsyncing log files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
sync_period = "10s"
|
||||
|
||||
## Parallelism during WAL recovery.
|
||||
recovery_parallelism = 2
|
||||
|
||||
## The Kafka broker endpoints.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
broker_endpoints = ["127.0.0.1:9092"]
|
||||
|
||||
## Automatically create topics for WAL.
|
||||
## Set to `true` to automatically create topics for WAL.
|
||||
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
|
||||
auto_create_topics = true
|
||||
|
||||
## Number of topics.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
num_topics = 64
|
||||
|
||||
## Topic selector type.
|
||||
## Available selector types:
|
||||
## - `round_robin` (default)
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
selector_type = "round_robin"
|
||||
|
||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
topic_name_prefix = "greptimedb_wal_topic"
|
||||
|
||||
## Expected number of replicas of each partition.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_topic_timeout = "30s"
|
||||
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
## This option ensures that when Kafka messages are deleted, the system
|
||||
## can still successfully replay memtable data without throwing an
|
||||
## out-of-range error.
|
||||
## However, enabling this option might lead to unexpected data loss,
|
||||
## as the system will skip over missing entries instead of treating
|
||||
## them as critical errors.
|
||||
overwrite_entry_start_id = false
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# Available SASL mechanisms:
|
||||
# - `PLAIN`
|
||||
# - `SCRAM-SHA-256`
|
||||
# - `SCRAM-SHA-512`
|
||||
# [wal.sasl]
|
||||
# type = "SCRAM-SHA-512"
|
||||
# username = "user_kafka"
|
||||
# password = "secret"
|
||||
|
||||
# The Kafka TLS configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
# [wal.tls]
|
||||
# server_ca_cert_path = "/path/to/server_cert"
|
||||
# client_cert_path = "/path/to/client_cert"
|
||||
# client_key_path = "/path/to/key"
|
||||
|
||||
## Metadata storage options.
|
||||
# Metadata storage options.
|
||||
[metadata_store]
|
||||
## The size of the metadata store log file.
|
||||
file_size = "64MB"
|
||||
## The threshold of the metadata store size to trigger a purge.
|
||||
purge_threshold = "256MB"
|
||||
## The interval of the metadata store to trigger a purge.
|
||||
purge_interval = "1m"
|
||||
# Kv file size in bytes.
|
||||
file_size = "256MB"
|
||||
# Kv purge threshold.
|
||||
purge_threshold = "4GB"
|
||||
|
||||
## Procedure storage options.
|
||||
# Procedure storage options.
|
||||
[procedure]
|
||||
## Procedure max retry time.
|
||||
# Procedure max retry time.
|
||||
max_retry_times = 3
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
# Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
## The number of flow worker in flownode.
|
||||
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||
#+num_workers=0
|
||||
|
||||
# Example of using S3 as the storage.
|
||||
# [storage]
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
# type = "Oss"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# access_key_secret = "123456"
|
||||
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
|
||||
|
||||
# Example of using Azblob as the storage.
|
||||
# [storage]
|
||||
# type = "Azblob"
|
||||
# container = "greptimedb"
|
||||
# root = "data"
|
||||
# account_name = "test"
|
||||
# account_key = "123456"
|
||||
# endpoint = "https://greptimedb.blob.core.windows.net"
|
||||
# sas_token = ""
|
||||
|
||||
# Example of using Gcs as the storage.
|
||||
# [storage]
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
|
||||
## The query engine options.
|
||||
[query]
|
||||
## Parallelism of the query engine.
|
||||
## Default to 0, which means the number of CPU cores.
|
||||
parallelism = 0
|
||||
|
||||
## The data storage options.
|
||||
# Storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "./greptimedb_data"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
## - `S3`: the data is stored in the S3 object storage.
|
||||
## - `Gcs`: the data is stored in the Google Cloud Storage.
|
||||
## - `Azblob`: the data is stored in the Azure Blob Storage.
|
||||
## - `Oss`: the data is stored in the Aliyun OSS.
|
||||
# The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
# Storage type.
|
||||
type = "File"
|
||||
|
||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||
## @toml2docs:none-default
|
||||
#+ cache_path = ""
|
||||
|
||||
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
|
||||
## @toml2docs:none-default
|
||||
cache_capacity = "5GiB"
|
||||
|
||||
## The S3 bucket name.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
bucket = "greptimedb"
|
||||
|
||||
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
|
||||
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
root = "greptimedb"
|
||||
|
||||
## The access key id of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3` and `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
access_key_id = "test"
|
||||
|
||||
## The secret access key of the aws account.
|
||||
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
|
||||
## **It's only used when the storage type is `S3`**.
|
||||
## @toml2docs:none-default
|
||||
secret_access_key = "test"
|
||||
|
||||
## The secret access key of the aliyun account.
|
||||
## **It's only used when the storage type is `Oss`**.
|
||||
## @toml2docs:none-default
|
||||
access_key_secret = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
account_name = "test"
|
||||
|
||||
## The account key of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
account_key = "test"
|
||||
|
||||
## The scope of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
scope = "test"
|
||||
|
||||
## The credential path of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
credential_path = "test"
|
||||
|
||||
## The credential of the google cloud storage.
|
||||
## **It's only used when the storage type is `Gcs`**.
|
||||
## @toml2docs:none-default
|
||||
credential = "base64-credential"
|
||||
|
||||
## The container of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
container = "greptimedb"
|
||||
|
||||
## The sas token of the azure account.
|
||||
## **It's only used when the storage type is `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
sas_token = ""
|
||||
|
||||
## The endpoint of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
endpoint = "https://s3.amazonaws.com"
|
||||
|
||||
## The region of the S3 service.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
## @toml2docs:none-default
|
||||
region = "us-west-2"
|
||||
|
||||
## The http client options to the storage.
|
||||
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
|
||||
[storage.http_client]
|
||||
|
||||
## The maximum idle connection per host allowed in the pool.
|
||||
pool_max_idle_per_host = 1024
|
||||
|
||||
## The timeout for only the connect phase of a http client.
|
||||
connect_timeout = "30s"
|
||||
|
||||
## The total request timeout, applied from when the request starts connecting until the response body has finished.
|
||||
## Also considered a total deadline.
|
||||
timeout = "30s"
|
||||
|
||||
## The timeout for idle sockets being kept-alive.
|
||||
pool_idle_timeout = "90s"
|
||||
|
||||
## To skip the ssl verification
|
||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
||||
skip_ssl_validation = false
|
||||
# TTL for all tables. Disabled by default.
|
||||
# global_ttl = "7d"
|
||||
# Cache configuration for object storage such as 'S3' etc.
|
||||
# cache_path = "/path/local_cache"
|
||||
# The local file cache capacity in bytes.
|
||||
# cache_capacity = "256MB"
|
||||
|
||||
# Custom storage options
|
||||
# [[storage.providers]]
|
||||
# name = "S3"
|
||||
# type = "S3"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# access_key_id = "test"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# [[storage.providers]]
|
||||
# name = "Gcs"
|
||||
# type = "Gcs"
|
||||
# bucket = "greptimedb"
|
||||
# root = "data"
|
||||
# scope = "test"
|
||||
# credential_path = "123456"
|
||||
# credential = "base64-credential"
|
||||
# endpoint = "https://storage.googleapis.com"
|
||||
#[[storage.providers]]
|
||||
#type = "S3"
|
||||
#[[storage.providers]]
|
||||
#type = "Gcs"
|
||||
|
||||
## The region engine options. You can configure multiple region engines.
|
||||
# Mito engine options
|
||||
[[region_engine]]
|
||||
|
||||
## The Mito engine options.
|
||||
[region_engine.mito]
|
||||
|
||||
## Number of region workers.
|
||||
#+ num_workers = 8
|
||||
|
||||
## Request channel size of each worker.
|
||||
# Number of region workers
|
||||
num_workers = 8
|
||||
# Request channel size of each worker
|
||||
worker_channel_size = 128
|
||||
|
||||
## Max batch size for a worker to handle requests.
|
||||
# Max batch size for a worker to handle requests
|
||||
worker_request_batch_size = 64
|
||||
|
||||
## Number of meta action updated to trigger a new checkpoint for the manifest.
|
||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||
manifest_checkpoint_distance = 10
|
||||
|
||||
## Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||
compress_manifest = false
|
||||
|
||||
## Max number of running background flush jobs (default: 1/2 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_flushes = 4
|
||||
|
||||
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_compactions = 2
|
||||
|
||||
## Max number of running background purge jobs (default: number of cpu cores).
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ max_background_purges = 8
|
||||
|
||||
## Interval to auto flush a region if it has not flushed yet.
|
||||
# Max number of running background jobs
|
||||
max_background_jobs = 4
|
||||
# Interval to auto flush a region if it has not flushed yet.
|
||||
auto_flush_interval = "1h"
|
||||
|
||||
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_size = "1GB"
|
||||
|
||||
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ global_write_buffer_reject_size = "2GB"
|
||||
|
||||
## Cache size for SST metadata. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ sst_meta_cache_size = "128MB"
|
||||
|
||||
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ vector_cache_size = "512MB"
|
||||
|
||||
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/8 of OS memory.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ page_cache_size = "512MB"
|
||||
|
||||
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
## @toml2docs:none-default="Auto"
|
||||
#+ selector_result_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||
enable_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}`.
|
||||
write_cache_path = ""
|
||||
|
||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||
write_cache_size = "5GiB"
|
||||
|
||||
## TTL for write cache.
|
||||
## @toml2docs:none-default
|
||||
write_cache_ttl = "8h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
# Global write buffer size for all regions.
|
||||
global_write_buffer_size = "1GB"
|
||||
# Global write buffer size threshold to reject write requests (default 2G).
|
||||
global_write_buffer_reject_size = "2GB"
|
||||
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||
sst_meta_cache_size = "128MB"
|
||||
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||
vector_cache_size = "512MB"
|
||||
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||
page_cache_size = "512MB"
|
||||
# Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||
# - 0: using the default value (1/4 of cpu cores).
|
||||
# - 1: scan in current thread.
|
||||
# - n: scan in parallelism n.
|
||||
scan_parallelism = 0
|
||||
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||
parallel_scan_channel_size = 32
|
||||
|
||||
## Maximum number of SST files to scan concurrently.
|
||||
max_concurrent_scan_files = 384
|
||||
# Log options
|
||||
# [logging]
|
||||
# Specify logs directory.
|
||||
# dir = "/tmp/greptimedb/logs"
|
||||
# Specify the log level [info | debug | error | warn]
|
||||
# level = "info"
|
||||
# whether enable tracing, default is false
|
||||
# enable_otlp_tracing = false
|
||||
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
|
||||
## Whether to allow stale WAL entries read during replay.
|
||||
allow_stale_entries = false
|
||||
|
||||
## Minimum time interval between two compactions.
|
||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||
min_compaction_interval = "0m"
|
||||
|
||||
## The options for index in Mito engine.
|
||||
[region_engine.mito.index]
|
||||
|
||||
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
|
||||
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
|
||||
## The default name for this directory is `index_intermediate` for backward compatibility.
|
||||
##
|
||||
## This path contains two subdirectories:
|
||||
## - `__intm`: for storing intermediate files used during creating index.
|
||||
## - `staging`: for storing staging files used during searching index.
|
||||
aux_path = ""
|
||||
|
||||
## The max capacity of the staging directory.
|
||||
staging_size = "2GB"
|
||||
|
||||
## The TTL of the staging directory.
|
||||
## Defaults to 7 days.
|
||||
## Setting it to "0s" to disable TTL.
|
||||
staging_ttl = "7d"
|
||||
|
||||
## Cache size for inverted index metadata.
|
||||
metadata_cache_size = "64MiB"
|
||||
|
||||
## Cache size for inverted index content.
|
||||
content_cache_size = "128MiB"
|
||||
|
||||
## Page size for inverted index content cache.
|
||||
content_cache_page_size = "64KiB"
|
||||
|
||||
## Cache size for index result.
|
||||
result_cache_size = "128MiB"
|
||||
|
||||
## The options for inverted index in Mito engine.
|
||||
[region_engine.mito.inverted_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for performing an external sort during index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||
intermediate_path = ""
|
||||
|
||||
## The options for full-text index in Mito engine.
|
||||
[region_engine.mito.fulltext_index]
|
||||
|
||||
## Whether to create the index on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the index on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the index on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for index creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
## The options for bloom filter in Mito engine.
|
||||
[region_engine.mito.bloom_filter_index]
|
||||
|
||||
## Whether to create the bloom filter on flush.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_flush = "auto"
|
||||
|
||||
## Whether to create the bloom filter on compaction.
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
create_on_compaction = "auto"
|
||||
|
||||
## Whether to apply the bloom filter on query
|
||||
## - `auto`: automatically (default)
|
||||
## - `disable`: never
|
||||
apply_on_query = "auto"
|
||||
|
||||
## Memory threshold for bloom filter creation.
|
||||
## - `auto`: automatically determine the threshold based on the system memory size (default)
|
||||
## - `unlimited`: no memory limit
|
||||
## - `[size]` e.g. `64MB`: fixed memory threshold
|
||||
mem_threshold_on_create = "auto"
|
||||
|
||||
[region_engine.mito.memtable]
|
||||
## Memtable type.
|
||||
## - `time_series`: time-series memtable
|
||||
## - `partition_tree`: partition tree memtable (experimental)
|
||||
type = "time_series"
|
||||
|
||||
## The max number of keys in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
index_max_keys_per_shard = 8192
|
||||
|
||||
## The max rows of data inside the actively writing buffer in one shard.
|
||||
## Only available for `partition_tree` memtable.
|
||||
data_freeze_threshold = 32768
|
||||
|
||||
## Max dictionary bytes.
|
||||
## Only available for `partition_tree` memtable.
|
||||
fork_dictionary_bytes = "1GiB"
|
||||
|
||||
[[region_engine]]
|
||||
## Enable the file engine.
|
||||
[region_engine.file]
|
||||
|
||||
[[region_engine]]
|
||||
## Metric engine options.
|
||||
[region_engine.metric]
|
||||
## Whether to enable the experimental sparse primary key encoding.
|
||||
experimental_sparse_primary_key_encoding = false
|
||||
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
level = "info"
|
||||
|
||||
## Enable OTLP tracing.
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318/v1/traces"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
|
||||
## The log format. Can be `text`/`json`.
|
||||
log_format = "text"
|
||||
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## Additional OTLP headers, only valid when using OTLP http
|
||||
[logging.otlp_headers]
|
||||
## @toml2docs:none-default
|
||||
#Authorization = "Bearer my-token"
|
||||
## @toml2docs:none-default
|
||||
#Database = "My database"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
[logging.tracing_sample_ratio]
|
||||
default_ratio = 1.0
|
||||
|
||||
## The slow query log options.
|
||||
[slow_query]
|
||||
## Whether to enable slow query log.
|
||||
#+ enable = false
|
||||
|
||||
## The record type of slow queries. It can be `system_table` or `log`.
|
||||
## @toml2docs:none-default
|
||||
#+ record_type = "system_table"
|
||||
|
||||
## The threshold of slow query.
|
||||
## @toml2docs:none-default
|
||||
#+ threshold = "10s"
|
||||
|
||||
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
|
||||
## @toml2docs:none-default
|
||||
#+ sample_ratio = 1.0
|
||||
|
||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
[export_metrics]
|
||||
## whether enable export metrics.
|
||||
enable = false
|
||||
## The interval of export metrics.
|
||||
write_interval = "30s"
|
||||
|
||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
||||
## You must create the database before enabling it.
|
||||
[export_metrics.self_import]
|
||||
## @toml2docs:none-default
|
||||
db = "greptime_metrics"
|
||||
|
||||
[export_metrics.remote_write]
|
||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
||||
url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
#+ [tracing]
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
## The memory options.
|
||||
[memory]
|
||||
## Whether to enable heap profiling activation during startup.
|
||||
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
|
||||
## is set to "prof:true,prof_active:false". The official image adds this env variable.
|
||||
## Default is true.
|
||||
enable_heap_profiling = true
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||
# [export_metrics]
|
||||
# whether enable export metrics, default is false
|
||||
# enable = false
|
||||
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||
# endpoint = "127.0.0.1:4000"
|
||||
# The database name of exported metrics stores, user needs to specify a valid database
|
||||
# db = ""
|
||||
# The interval of export metrics
|
||||
# write_interval = "30s"
|
||||
# HTTP headers of Prometheus remote-write carry
|
||||
# headers = {}
|
||||
|
||||
2
cyborg/.gitignore
vendored
2
cyborg/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
node_modules
|
||||
.env
|
||||
@@ -1,156 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {obtainClient} from "@/common";
|
||||
|
||||
interface RepoConfig {
|
||||
tokenEnv: string;
|
||||
repo: string;
|
||||
workflowLogic: (version: string) => [string, string] | null;
|
||||
}
|
||||
|
||||
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
||||
website: {
|
||||
tokenEnv: "WEBSITE_REPO_TOKEN",
|
||||
repo: "website",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for website
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for website, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
demo: {
|
||||
tokenEnv: "DEMO_REPO_TOKEN",
|
||||
repo: "demo-scene",
|
||||
workflowLogic: (version: string) => {
|
||||
// Skip nightly versions for demo
|
||||
if (version.includes('nightly')) {
|
||||
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
||||
return null;
|
||||
}
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
},
|
||||
docs: {
|
||||
tokenEnv: "DOCS_REPO_TOKEN",
|
||||
repo: "docs",
|
||||
workflowLogic: (version: string) => {
|
||||
// Check if it's a nightly version
|
||||
if (version.includes('nightly')) {
|
||||
return ['bump-nightly-version.yml', version];
|
||||
}
|
||||
|
||||
const parts = version.split('.');
|
||||
if (parts.length !== 3) {
|
||||
throw new Error('Invalid version format');
|
||||
}
|
||||
|
||||
// If patch version (last number) is 0, it's a major version
|
||||
// Return only major.minor version
|
||||
if (parts[2] === '0') {
|
||||
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||
}
|
||||
|
||||
// Otherwise it's a patch version, use full version
|
||||
return ['bump-patch-version.yml', version];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
||||
const client = obtainClient(repoConfig.tokenEnv);
|
||||
try {
|
||||
await client.rest.actions.createWorkflowDispatch({
|
||||
owner: "GreptimeTeam",
|
||||
repo: repoConfig.repo,
|
||||
workflow_id: workflowId,
|
||||
ref: "main",
|
||||
inputs: {
|
||||
version,
|
||||
},
|
||||
});
|
||||
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
||||
} catch (error) {
|
||||
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function processRepo(repoName: string, version: string) {
|
||||
const repoConfig = REPO_CONFIGS[repoName];
|
||||
if (!repoConfig) {
|
||||
throw new Error(`Unknown repository: ${repoName}`);
|
||||
}
|
||||
|
||||
try {
|
||||
const workflowResult = repoConfig.workflowLogic(version);
|
||||
if (workflowResult === null) {
|
||||
// Skip this repo (e.g., nightly version for website)
|
||||
return;
|
||||
}
|
||||
|
||||
const [workflowId, apiVersion] = workflowResult;
|
||||
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
||||
} catch (error) {
|
||||
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const version = process.env.VERSION;
|
||||
if (!version) {
|
||||
core.setFailed("VERSION environment variable is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove 'v' prefix if exists
|
||||
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||
|
||||
// Get target repositories from environment variable
|
||||
// Default to both if not specified
|
||||
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
||||
|
||||
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
||||
|
||||
const errors: string[] = [];
|
||||
|
||||
// Process each repository
|
||||
for (const repo of targetRepos) {
|
||||
try {
|
||||
await processRepo(repo, cleanVersion);
|
||||
} catch (error) {
|
||||
errors.push(`${repo}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('All repositories processed successfully');
|
||||
}
|
||||
|
||||
// Execute main function
|
||||
main().catch((error) => {
|
||||
core.setFailed(`Unexpected error: ${error.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEvent} from "@octokit/webhooks-types";
|
||||
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
|
||||
import conventionalCommitTypes from 'conventional-commit-types';
|
||||
import _ from "lodash";
|
||||
|
||||
const defaultTypes = Object.keys(conventionalCommitTypes.types)
|
||||
const breakingChangeLabel = "breaking-change"
|
||||
|
||||
// These options are copied from [1].
|
||||
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
|
||||
export const parserOpts: Options = {
|
||||
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
|
||||
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
|
||||
headerCorrespondence: [
|
||||
'type',
|
||||
'scope',
|
||||
'subject'
|
||||
],
|
||||
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
|
||||
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
|
||||
revertCorrespondence: ['header', 'hash'],
|
||||
issuePrefixes: ['#']
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
}
|
||||
const { data: pull_request } = await client.rest.pulls.get({
|
||||
owner, repo, pull_number: number,
|
||||
})
|
||||
|
||||
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
|
||||
core.info(`Receive commit: ${JSON.stringify(commit)}`)
|
||||
|
||||
if (!commit.type) {
|
||||
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
if (!defaultTypes.includes(commit.type)) {
|
||||
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
|
||||
if (breakingChanges.length > 0) {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [breakingChangeLabel]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,119 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||
import {RequestError} from "@octokit/request-error";
|
||||
|
||||
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||
const labelDocsNotRequired = "docs-not-required"
|
||||
const labelDocsRequired = "docs-required"
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number, actor, title, html_url } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
title: payload.pull_request.title,
|
||||
html_url: payload.pull_request.html_url,
|
||||
actor: payload.pull_request.user.login,
|
||||
}
|
||||
const followUpDocs = checkPullRequestEvent(payload)
|
||||
if (followUpDocs) {
|
||||
core.info("Follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
|
||||
// Get available assignees for the docs repo
|
||||
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
})
|
||||
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||
|
||||
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||
core.info(`Assigning issue to: ${assignee}`)
|
||||
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: assignee,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
} else {
|
||||
core.info("No need to follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsRequired
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||
switch (payload.action) {
|
||||
case "opened":
|
||||
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||
case "edited":
|
||||
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||
default:
|
||||
throw new Error(`${payload.action} is unsupported.`)
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||
// @ts-ignore
|
||||
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||
}
|
||||
|
||||
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||
// from docs-not-need to docs-required
|
||||
return (!previous) && current
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,83 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common"
|
||||
import {context} from "@actions/github"
|
||||
import _ from "lodash"
|
||||
|
||||
async function main() {
|
||||
const success = process.env["CI_REPORT_STATUS"] === "true"
|
||||
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const title = `Workflow run '${context.workflow}' failed`
|
||||
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
|
||||
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
|
||||
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
||||
|
||||
const {owner, repo} = context.repo
|
||||
const labels = ['O-ci-failure']
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
labels: labels.join(','),
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "desc",
|
||||
});
|
||||
const issue = _.find(issues, (i) => i.title === title);
|
||||
|
||||
if (issue) { // exist issue
|
||||
core.info(`Found previous issue ${issue.html_url}`)
|
||||
if (!success) {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: failure_comment,
|
||||
})
|
||||
} else {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: success_comment,
|
||||
})
|
||||
await client.rest.issues.update({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
state: "closed",
|
||||
state_reason: "completed",
|
||||
})
|
||||
}
|
||||
core.setOutput("html_url", issue.html_url)
|
||||
} else if (!success) { // create new issue for failure
|
||||
const issue = await client.rest.issues.create({
|
||||
owner,
|
||||
repo,
|
||||
title,
|
||||
labels,
|
||||
body: failure_comment,
|
||||
})
|
||||
core.info(`Created issue ${issue.data.html_url}`)
|
||||
core.setOutput("html_url", issue.data.html_url)
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {GitHub} from "@actions/github/lib/utils"
|
||||
import _ from "lodash";
|
||||
import dayjs from "dayjs";
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
|
||||
async function main() {
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
await unassign(client)
|
||||
}
|
||||
|
||||
async function unassign(client: InstanceType<typeof GitHub>) {
|
||||
const owner = "GreptimeTeam"
|
||||
const repo = "greptimedb"
|
||||
|
||||
const dt = dayjs().subtract(14, 'days');
|
||||
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
|
||||
|
||||
const members = await client.paginate(client.rest.repos.listCollaborators, {
|
||||
owner,
|
||||
repo,
|
||||
permission: "push",
|
||||
per_page: 100
|
||||
}).then((members) => members.map((member) => member.login))
|
||||
core.info(`Members (${members.length}): ${members}`)
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "asc",
|
||||
per_page: 100
|
||||
})
|
||||
for (const issue of issues) {
|
||||
let assignees = [];
|
||||
if (issue.assignee) {
|
||||
assignees.push(issue.assignee.login)
|
||||
}
|
||||
for (const assignee of issue.assignees) {
|
||||
assignees.push(assignee.login)
|
||||
}
|
||||
assignees = _.uniq(assignees)
|
||||
assignees = _.difference(assignees, members)
|
||||
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
|
||||
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
|
||||
await client.rest.issues.removeAssignees({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
assignees: assignees,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "cyborg",
|
||||
"version": "1.0.0",
|
||||
"description": "Automator for GreptimeDB Repository Management",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@8.15.5",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/request-error": "^6.1.1",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
"dayjs": "^1.11.11",
|
||||
"dotenv": "^16.4.5",
|
||||
"lodash": "^4.17.21"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/conventional-commits-parser": "^5.0.0",
|
||||
"@types/lodash": "^4.17.0",
|
||||
"@types/node": "^20.12.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"tsx": "^4.8.2",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user