Compare commits

..

11 Commits

Author SHA1 Message Date
Discord9
24f5e56196 PY_ENV_MAN for env manager choice 2023-12-07 10:42:26 +08:00
Discord9
c85d569797 feat: refactor parse_args&rm USE_ENV 2023-12-07 10:35:40 +08:00
Discord9
e95a8e070c feat: more opts for cli 2023-12-06 17:58:49 +08:00
Discord9
b71bf11772 feat: add choices for venv/conda 2023-12-06 17:39:12 +08:00
Discord9
ee0a3972fc feat: remove check script 2023-12-06 16:40:58 +08:00
Discord9
8fb40c66a4 feat: opt path 2023-12-06 16:29:00 +08:00
Discord9
e855f6370e chore: add newline in end 2023-12-06 16:29:00 +08:00
Discord9
fb5dcbc40c chore: remove python script for that 2023-12-06 16:29:00 +08:00
Discord9
0d109436b8 feat:use python script instead 2023-12-06 16:29:00 +08:00
Discord9
cbae03af07 feat: check aft build in Makefile 2023-12-06 16:29:00 +08:00
Discord9
902e6ead60 feat: shell check&install needed python shared lib 2023-12-06 16:29:00 +08:00
1864 changed files with 48448 additions and 254573 deletions

View File

@@ -3,3 +3,13 @@ linker = "aarch64-linux-gnu-gcc"
[alias] [alias]
sqlness = "run --bin sqlness-runner --" sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[{Makefile,**.mk}]
indent_style = tab

View File

@@ -18,19 +18,4 @@ GT_AZBLOB_ENDPOINT=AZBLOB endpoint
GT_GCS_BUCKET = GCS bucket GT_GCS_BUCKET = GCS bucket
GT_GCS_SCOPE = GCS scope GT_GCS_SCOPE = GCS scope
GT_GCS_CREDENTIAL_PATH = GCS credential path GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_CREDENTIAL = GCS credential
GT_GCS_ENDPOINT = GCS end point GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002
# Setting for unstable fuzz tests
GT_FUZZ_BINARY_PATH=/path/to/
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
GT_FUZZ_INPUT_MAX_ROWS=2048
GT_FUZZ_INPUT_MAX_TABLES=32
GT_FUZZ_INPUT_MAX_COLUMNS=32
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8

27
.github/CODEOWNERS vendored
View File

@@ -1,27 +0,0 @@
# GreptimeDB CODEOWNERS
# These owners will be the default owners for everything in the repo.
* @GreptimeTeam/db-approver
## [Module] Databse Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag
## [Module] Distributed
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
## [Module] Write Ahead Log
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
## [Module] Metrics Engine
/src/metric-engine @waynexia
/src/promql @waynexia
## [Module] Flow
/src/flow @zhongzc @waynexia

View File

@@ -1,7 +1,7 @@
--- ---
name: Bug report name: Bug report
description: Is something not working? Help us fix it! description: Is something not working? Help us fix it!
labels: [ "C-bug" ] labels: [ "bug" ]
body: body:
- type: markdown - type: markdown
attributes: attributes:
@@ -21,7 +21,6 @@ body:
- Locking issue - Locking issue
- Performance issue - Performance issue
- Unexpected error - Unexpected error
- User Experience
- Other - Other
validations: validations:
required: true required: true
@@ -34,14 +33,9 @@ body:
multiple: true multiple: true
options: options:
- Standalone mode - Standalone mode
- Distributed Cluster
- Storage Engine
- Query Engine
- Table Engine
- Write Protocols
- Metasrv
- Frontend - Frontend
- Datanode - Datanode
- Meta
- Other - Other
validations: validations:
required: true required: true
@@ -83,17 +77,6 @@ body:
validations: validations:
required: true required: true
- type: input
id: greptimedb
attributes:
label: What version of GreptimeDB did you use?
description: |
Please provide the version of GreptimeDB. For example:
0.5.1 etc. You can get it by executing command line `greptime --version`.
placeholder: "0.5.1"
validations:
required: true
- type: textarea - type: textarea
id: logs id: logs
attributes: attributes:

View File

@@ -4,5 +4,5 @@ contact_links:
url: https://greptime.com/slack url: https://greptime.com/slack
about: Get free help from the Greptime community about: Get free help from the Greptime community
- name: Greptime Community Discussion - name: Greptime Community Discussion
url: https://github.com/greptimeTeam/discussions url: https://github.com/greptimeTeam/greptimedb/discussions
about: Get free help from the Greptime community about: Get free help from the Greptime community

View File

@@ -1,7 +1,7 @@
--- ---
name: Enhancement name: Enhancement
description: Suggest an enhancement to existing functionality description: Suggest an enhancement to existing functionality
labels: [ "C-enhancement" ] labels: [ "enhancement" ]
body: body:
- type: dropdown - type: dropdown
id: type id: type

View File

@@ -1,7 +1,7 @@
--- ---
name: New Feature name: Feature request
description: Suggest a new feature for GreptimeDB description: Suggest a new feature for GreptimeDB
labels: [ "C-feature" ] labels: [ "feature request" ]
body: body:
- type: markdown - type: markdown
id: info id: info

View File

@@ -1,18 +0,0 @@
name: Build and push CI Docker image
description: Build and push CI Docker image to local registry
inputs:
binary_path:
default: "./bin"
description: "Binary path"
runs:
using: composite
steps:
- name: Build and push to local registry
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
push: true
tags: localhost:5001/greptime/greptimedb:latest
build-args: |
BINARY_PATH=${{ inputs.binary_path }}

View File

@@ -22,15 +22,15 @@ inputs:
build-dev-builder-ubuntu: build-dev-builder-ubuntu:
description: Build dev-builder-ubuntu image description: Build dev-builder-ubuntu image
required: false required: false
default: "true" default: 'true'
build-dev-builder-centos: build-dev-builder-centos:
description: Build dev-builder-centos image description: Build dev-builder-centos image
required: false required: false
default: "true" default: 'true'
build-dev-builder-android: build-dev-builder-android:
description: Build dev-builder-android image description: Build dev-builder-android image
required: false required: false
default: "true" default: 'true'
runs: runs:
using: composite using: composite
steps: steps:
@@ -47,10 +47,10 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=ubuntu \ BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=all \ BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image - name: Build and push dev-builder-centos image
shell: bash shell: bash
@@ -58,10 +58,10 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=centos \ BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \ BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-android image # Only build image for amd64 platform. - name: Build and push dev-builder-android image # Only build image for amd64 platform.
shell: bash shell: bash
@@ -71,6 +71,6 @@ runs:
BASE_IMAGE=android \ BASE_IMAGE=android \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \ IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }} docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -24,14 +24,6 @@ inputs:
description: Build android artifacts description: Build android artifacts
required: false required: false
default: 'false' default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs: runs:
using: composite using: composite
steps: steps:
@@ -43,18 +35,14 @@ runs:
make build-by-dev-builder \ make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \ FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \ BASE_IMAGE=${{ inputs.base-image }}
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
if: ${{ inputs.build-android-artifacts == 'false' }} if: ${{ inputs.build-android-artifacts == 'false' }}
env:
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/$PROFILE_TARGET/greptime target-file: ./target/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
@@ -63,9 +51,7 @@ runs:
shell: bash shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }} if: ${{ inputs.build-android-artifacts == 'true' }}
run: | run: |
cd ${{ inputs.working-dir }} && make strip-android-bin \ cd ${{ inputs.working-dir }} && make strip-android-bin
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts - name: Upload android artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts

View File

@@ -53,7 +53,7 @@ runs:
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
- name: Download amd64 artifacts - name: Download amd64 artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
with: with:
name: ${{ inputs.amd64-artifact-name }} name: ${{ inputs.amd64-artifact-name }}
@@ -66,7 +66,7 @@ runs:
mv ${{ inputs.amd64-artifact-name }} amd64 mv ${{ inputs.amd64-artifact-name }} amd64
- name: Download arm64 artifacts - name: Download arm64 artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
if: ${{ inputs.arm64-artifact-name }} if: ${{ inputs.arm64-artifact-name }}
with: with:
name: ${{ inputs.arm64-artifact-name }} name: ${{ inputs.arm64-artifact-name }}

View File

@@ -16,13 +16,7 @@ inputs:
dev-mode: dev-mode:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false
default: "false" default: 'false'
image-namespace:
description: Image Namespace
required: true
image-registry:
description: Image Registry
required: true
working-dir: working-dir:
description: Working directory to build the artifacts description: Working directory to build the artifacts
required: false required: false
@@ -36,13 +30,11 @@ runs:
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance. # NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: | run: |
cd ${{ inputs.working-dir }} && \ cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \ make run-it-in-container BUILD_JOBS=4
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed. if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: /tmp/greptime-*.log
@@ -57,8 +49,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3 - name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }} if: ${{ inputs.dev-mode == 'false' }}
@@ -70,8 +60,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build. - name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash shell: bash
@@ -80,7 +68,7 @@ runs:
- name: Build greptime on centos base image - name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64. if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
with: with:
base-image: centos base-image: centos
features: servers/dashboard features: servers/dashboard
@@ -88,17 +76,13 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime on android base image - name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64. if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
with: with:
base-image: android base-image: android
artifacts-dir: greptime-android-arm64-${{ inputs.version }} artifacts-dir: greptime-android-arm64-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}

View File

@@ -4,6 +4,9 @@ inputs:
arch: arch:
description: Architecture to build description: Architecture to build
required: true required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile: cargo-profile:
description: Cargo profile to build description: Cargo profile to build
required: true required: true
@@ -40,9 +43,10 @@ runs:
brew install protobuf brew install protobuf
- name: Install rust toolchain - name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
target: ${{ inputs.arch }} toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
- name: Start etcd # For integration tests. - name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
@@ -55,22 +59,15 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests - name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
SQLNESS_OPTS: "--preserve-state"
run: | run: |
make test sqlness-test make test sqlness-test
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: /tmp/greptime-*.log path: /tmp/greptime-*.log
@@ -78,8 +75,6 @@ runs:
- name: Build greptime binary - name: Build greptime binary
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: | run: |
make build \ make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \

View File

@@ -4,6 +4,9 @@ inputs:
arch: arch:
description: Architecture to build description: Architecture to build
required: true required: true
rust-toolchain:
description: Rust toolchain to use
required: true
cargo-profile: cargo-profile:
description: Cargo profile to build description: Cargo profile to build
required: true required: true
@@ -22,25 +25,26 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
- name: Install rust toolchain - name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
target: ${{ inputs.arch }} toolchain: ${{ inputs.rust-toolchain }}
targets: ${{ inputs.arch }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v4
with: with:
python-version: "3.10" python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
shell: pwsh shell: pwsh
run: pip install pyarrow numpy run: pip install pyarrow
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
@@ -55,22 +59,18 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
shell: pwsh shell: pwsh
run: make test sqlness-test run: make test sqlness-test
env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness* path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime binary - name: Build greptime binary
shell: pwsh shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts

View File

@@ -1,19 +0,0 @@
name: Fuzz Test
description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
required: true
max-total-time:
description: "Max total time(secs)"
required: true
unstable:
default: 'false'
description: "Enable unstable feature"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}

View File

@@ -15,7 +15,7 @@ runs:
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ... # ...
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
- name: Create git tag for release - name: Create git tag for release
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow. if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.

View File

@@ -73,7 +73,7 @@ runs:
using: composite using: composite
steps: steps:
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
with: with:
path: ${{ inputs.artifacts-dir }} path: ${{ inputs.artifacts-dir }}
@@ -123,10 +123,10 @@ runs:
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }} DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: | run: |
./.github/scripts/copy-image.sh \ ./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \ ${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }} ${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push latest greptimedb-centos image from DockerHub to ACR - name: Push greptimedb-centos image from DockerHub to ACR
shell: bash shell: bash
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }} if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
env: env:

View File

@@ -1,17 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
kubectl create ns chaos-mesh
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
- name: Print Chaos-mesh
if: always()
shell: bash
run: |
kubectl get po -n chaos-mesh

View File

@@ -1,16 +0,0 @@
name: Setup cyborg environment
description: Setup cyborg environment
runs:
using: composite
steps:
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
shell: bash
run: pnpm tsx -v

View File

@@ -1,27 +0,0 @@
name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 1
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm upgrade \
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set resources.limits.cpu=1500m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,95 +0,0 @@
name: Setup GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
frontend-replicas:
default: 2
description: "Number of Frontend replicas"
datanode-replicas:
default: 2
description: "Number of Datanode replicas"
meta-replicas:
default: 3
description: "Number of Metasrv replicas"
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
enable-region-failover:
default: false
runs:
using: composite
steps:
- name: Install GreptimeDB operator
uses: nick-fields/retry@v3
with:
timeout_minutes: 3
max_attempts: 3
shell: bash
command: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
--create-namespace \
greptimedb-operator greptime/greptimedb-operator \
-n greptimedb-admin \
--wait \
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
kubectl get pods -n my-greptimedb
sleep 5 # wait for 5 seconds before check again.
fi
done
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
kubectl describe nodes

View File

@@ -1,13 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4

View File

@@ -1,33 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,29 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,45 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
remoteWal:
enabled: true
kafka:
brokerEndpoints:
- "kafka.kafka-cluster.svc.cluster.local:9092"

View File

@@ -1,24 +0,0 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,10 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
./.github/scripts/kind-with-registry.sh

View File

@@ -1,24 +0,0 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -1,30 +0,0 @@
name: Setup PostgreSQL
description: Deploy PostgreSQL on Kubernetes
inputs:
postgres-replicas:
default: 1
description: "Number of PostgreSQL replicas"
namespace:
default: "postgres-namespace"
postgres-version:
default: "14.2"
description: "PostgreSQL version"
storage-size:
default: "1Gi"
description: "Storage size for PostgreSQL"
runs:
using: composite
steps:
- name: Install PostgreSQL
shell: bash
run: |
helm upgrade \
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
--set replicaCount=${{ inputs.postgres-replicas }} \
--set image.tag=${{ inputs.postgres-version }} \
--set persistence.size=${{ inputs.storage-size }} \
--set postgresql.username=greptimedb \
--set postgresql.password=admin \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -57,14 +57,3 @@ runs:
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }} AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: | run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
- name: Export kind logs
if: failure()
shell: bash
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: kind-logs
path: /tmp/kind
retention-days: 3

View File

@@ -38,7 +38,7 @@ runs:
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2') if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v4 uses: aws-actions/configure-aws-credentials@v2
with: with:
aws-access-key-id: ${{ inputs.aws-access-key-id }} aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }} aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -25,7 +25,7 @@ runs:
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }} if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v4 uses: aws-actions/configure-aws-credentials@v2
with: with:
aws-access-key-id: ${{ inputs.aws-access-key-id }} aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }} aws-secret-access-key: ${{ inputs.aws-secret-access-key }}

View File

@@ -6,7 +6,7 @@ inputs:
required: true required: true
target-file: target-file:
description: The path of the target artifact description: The path of the target artifact
required: false required: true
version: version:
description: Version of the artifact description: Version of the artifact
required: true required: true
@@ -18,12 +18,11 @@ runs:
using: composite using: composite
steps: steps:
- name: Create artifacts directory - name: Create artifacts directory
if: ${{ inputs.target-file != '' }}
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}
shell: bash shell: bash
run: | run: |
mkdir -p ${{ inputs.artifacts-dir }} && \ mkdir -p ${{ inputs.artifacts-dir }} && \
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }} mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
# The compressed artifacts will use the following layout: # The compressed artifacts will use the following layout:
# greptime-linux-amd64-pyo3-v0.3.0sha256sum # greptime-linux-amd64-pyo3-v0.3.0sha256sum
@@ -50,15 +49,15 @@ runs:
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39). # Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
# However, when we use 'actions/download-artifact' to download the artifacts, it will be automatically unzipped. # However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: ${{ inputs.artifacts-dir }} name: ${{ inputs.artifacts-dir }}
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
- name: Upload checksum - name: Upload checksum
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: ${{ inputs.artifacts-dir }}.sha256sum name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

12
.github/pr-title-checker-config.json vendored Normal file
View File

@@ -0,0 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

View File

@@ -1,10 +1,8 @@
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md). I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
## Refer to a related PR or issue link (optional)
## What's changed and what's your intention? ## What's changed and what's your intention?
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__ _PLEASE DO NOT LEAVE THIS EMPTY !!!_
Please explain IN DETAIL what the changes are in this PR and why they are needed: Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -17,4 +15,5 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
- [ ] I have written the necessary rustdoc comments. - [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests. - [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates.
## Refer to a related PR or issue link (optional)

View File

@@ -107,9 +107,12 @@ function deploy_greptimedb_cluster_with_s3_storage() {
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \ --set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
--set storage.s3.region="$AWS_REGION" \ --set storage.s3.region="$AWS_REGION" \
--set storage.s3.root="$DATA_ROOT" \ --set storage.s3.root="$DATA_ROOT" \
--set storage.s3.secretName=s3-credentials \
--set storage.credentials.secretName=s3-credentials \ --set storage.credentials.secretName=s3-credentials \
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \ --set storage.credentials.secretCreation.enabled=true \
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY" --set storage.credentials.secretCreation.enableEncryption=false \
--set storage.credentials.secretCreation.data.access-key-id="$AWS_ACCESS_KEY_ID" \
--set storage.credentials.secretCreation.data.secret-access-key="$AWS_SECRET_ACCESS_KEY"
# Wait for greptimedb cluster to be ready. # Wait for greptimedb cluster to be ready.
while true; do while true; do

View File

@@ -1,66 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --wait 2m --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@@ -1,7 +1,7 @@
on: on:
push: push:
branches: branches:
- main - develop
paths-ignore: paths-ignore:
- 'docs/**' - 'docs/**'
- 'config/**' - 'config/**'
@@ -12,15 +12,20 @@ on:
name: Build API docs name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-10-21
jobs: jobs:
apidoc: apidoc:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items - run: cargo doc --workspace --no-deps --document-private-items
- run: | - run: |
cat <<EOF > target/doc/index.html cat <<EOF > target/doc/index.html
@@ -35,4 +40,3 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4 uses: JamesIves/github-pages-deploy-action@v4
with: with:
folder: target/doc folder: target/doc
single-commit: true

View File

@@ -55,18 +55,10 @@ on:
description: Build and push images to DockerHub and ACR description: Build and push images to DockerHub and ACR
required: false required: false
default: true default: true
cargo_profile:
type: choice
description: The cargo profile to use in building GreptimeDB.
default: nightly
options:
- dev
- release
- nightly
# Use env variables to control all the release process. # Use env variables to control all the release process.
env: env:
CARGO_PROFILE: ${{ inputs.cargo_profile }} CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }} DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
@@ -82,9 +74,6 @@ env:
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'. # The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -104,7 +93,7 @@ jobs:
version: ${{ steps.create-version.outputs.version }} version: ${{ steps.create-version.outputs.version }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -158,12 +147,12 @@ jobs:
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
@@ -177,8 +166,6 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary. dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -189,12 +176,12 @@ jobs:
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
@@ -208,8 +195,6 @@ jobs:
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary. dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
@@ -223,7 +208,7 @@ jobs:
outputs: outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }} build-result: ${{ steps.set-build-result.outputs.build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -254,7 +239,7 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
continue-on-error: true continue-on-error: true
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -288,7 +273,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -313,7 +298,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -328,7 +313,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
@@ -337,25 +322,16 @@ jobs:
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - name: Notifiy nightly build successful result
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify dev build failed result - name: Notifiy nightly build failed result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}

View File

@@ -9,9 +9,9 @@ on:
- '.dockerignore' - '.dockerignore'
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**'
push: push:
branches: branches:
- develop
- main - main
paths-ignore: paths-ignore:
- 'docs/**' - 'docs/**'
@@ -20,7 +20,6 @@ on:
- '.dockerignore' - '.dockerignore'
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**'
workflow_dispatch: workflow_dispatch:
name: CI name: CI
@@ -29,727 +28,170 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: env:
check-typos-and-docs: RUST_TOOLCHAIN: nightly-2023-10-21
name: Check typos and docs
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: crate-ci/typos@master
- name: Check the config docs
run: |
make config-docs && \
git diff --name-only --exit-code ./config/config.md \
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check: jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: korandoru/hawkeye@v5 - uses: crate-ci/typos@v1.13.10
check: check:
name: Check name: Check
if: github.event.pull_request.draft == false
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ windows-2022, ubuntu-20.04 ] os: [ windows-latest-8-cores, ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
- name: Run cargo check - name: Run cargo check
run: cargo check --locked --workspace --all-targets run: cargo check --locked --workspace --all-targets
toml: toml:
name: Toml Check name: Toml Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo - name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked run: cargo +stable install taplo-cli --version ^0.8 --locked
- name: Run taplo - name: Run taplo
run: taplo format --check run: taplo format --check
build:
name: Build GreptimeDB binaries
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-binaries"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime binaries
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries
shell: bash
run: |
mkdir bins && \
mv ./target/debug/greptime bins && \
mv ./target/debug/sqlness-runner bins
- name: Print greptime binaries info
run: ls -lh bins
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bins
version: current
fuzztest:
name: Fuzz Test
needs: build
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build-greptime-ci
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip bianry
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Run Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable: 'true'
- name: Upload unstable fuzz test logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: unstable-fuzz-logs
path: /tmp/unstable-greptime/
retention-days: 3
build-greptime-ci:
name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Check aws-lc-sys will not build
shell: bash
run: |
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
exit 1
fi
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/ci/greptime bin
- name: Print greptime binaries info
run: ls -lh bin
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bin
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
timeout-minutes: 60
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
timeout-minutes: 60
strategy:
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
include:
- target: "fuzz_migrate_mito_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
- target: "fuzz_migrate_metric_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh
uses: ./.github/actions/setup-chaos
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
enable-region-failover: ${{ matrix.mode.kafka }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test
needs: build if: github.event.pull_request.draft == false
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04 ] os: [ ubuntu-20.04-8-cores ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- if: matrix.mode.kafka - uses: arduino/setup-protoc@v1
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with: with:
name: bins repo-token: ${{ secrets.GITHUB_TOKEN }}
path: . - uses: dtolnay/rust-toolchain@master
- name: Unzip binaries with:
run: tar -xvf ./bins.tar.gz toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state run: cargo sqlness
- name: Upload sqlness logs - name: Upload sqlness logs
if: failure() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs-${{ matrix.mode.name }} name: sqlness-logs
path: /tmp/sqlness* path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
fmt: fmt:
name: Rustfmt name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt components: rustfmt
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: - name: Run cargo fmt
# Shares across multiple jobs run: cargo fmt --all -- --check
shared-key: "check-rust-fmt"
- name: Check format
run: make fmt-check
clippy: clippy:
name: Clippy name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy components: clippy
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
- name: Run cargo clippy - name: Run cargo clippy
run: make clippy run: cargo clippy --workspace --all-targets -- -D warnings
coverage: coverage:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04-8-cores runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1 - uses: KyleMayes/install-llvm-action@v1
with: with:
version: "14.0" version: "14.0"
- name: Install toolchain - name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release - name: Install latest nextest release
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v4
with: with:
python-version: '3.10' python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
run: pip install pyarrow numpy run: pip install pyarrow
- name: Setup etcd server - name: Setup etcd server
working-directory: tests-integration/fixtures/etcd working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld" CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ secrets.S3_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload - name: Codecov upload
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v2
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info files: ./lcov.info
flags: rust flags: rust
fail_ci_if_error: false fail_ci_if_error: false
verbose: true verbose: true
# compat:
# name: Compatibility Test
# needs: build
# runs-on: ubuntu-20.04
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v4
# - name: Download pre-built binaries
# uses: actions/download-artifact@v4
# with:
# name: bins
# path: .
# - name: Unzip binaries
# run: |
# mkdir -p ./bins/current
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
# - run: ./tests/compat/test-compat.sh 0.6.0

39
.github/workflows/doc-issue.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Create Issue in downstream repos
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

View File

@@ -1,22 +0,0 @@
name: Follow Up Docs
on:
pull_request_target:
types: [opened, edited]
permissions:
pull-requests: write
contents: read
jobs:
docbot:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue
working-directory: cyborg
run: pnpm tsx bin/follow-up-docs-issue.ts
env:
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -9,9 +9,9 @@ on:
- '.dockerignore' - '.dockerignore'
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**'
push: push:
branches: branches:
- develop
- main - main
paths: paths:
- 'docs/**' - 'docs/**'
@@ -20,7 +20,6 @@ on:
- '.dockerignore' - '.dockerignore'
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**'
workflow_dispatch: workflow_dispatch:
name: CI name: CI
@@ -33,47 +32,39 @@ jobs:
name: Spell Check with Typos name: Spell Check with Typos
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
- uses: crate-ci/typos@master - uses: crate-ci/typos@v1.13.10
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check: check:
name: Check name: Check
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
fmt: fmt:
name: Rustfmt name: Rustfmt
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
clippy: clippy:
name: Clippy name: Clippy
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
coverage: coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test
runs-on: ${{ matrix.os }} if: github.event.pull_request.draft == false
strategy: runs-on: ubuntu-20.04
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
- name: "Remote WAL"
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'

16
.github/workflows/license.yaml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: License checker
on:
push:
branches:
- develop
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-20.04
name: license-header-check
steps:
- uses: actions/checkout@v2
- name: Check License Header
uses: korandoru/hawkeye@v3

View File

@@ -66,13 +66,6 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -92,7 +85,7 @@ jobs:
version: ${{ steps.create-version.outputs.version }} version: ${{ steps.create-version.outputs.version }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -144,7 +137,7 @@ jobs:
] ]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -154,8 +147,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -165,7 +156,7 @@ jobs:
] ]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -175,8 +166,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
@@ -190,7 +179,7 @@ jobs:
outputs: outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }} nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -199,11 +188,10 @@ jobs:
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: true push-latest-tag: false # Don't push the latest tag to registry.
- name: Set nightly build result - name: Set nightly build result
id: set-nightly-build-result id: set-nightly-build-result
@@ -223,7 +211,7 @@ jobs:
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true continue-on-error: true
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -232,7 +220,7 @@ jobs:
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ env.IMAGE_NAME }} src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -244,7 +232,7 @@ jobs:
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false dev-mode: false
update-version-info: false # Don't update version info in S3. update-version-info: false # Don't update version info in S3.
push-latest-tag: true push-latest-tag: false # Don't push the latest tag to registry.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
@@ -257,7 +245,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -282,7 +270,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -297,7 +285,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
@@ -306,25 +294,16 @@ jobs:
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - name: Notifiy nightly build successful result
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result - name: Notifiy nightly build failed result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}

View File

@@ -1,6 +1,8 @@
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
on: on:
schedule: schedule:
- cron: "0 23 * * 1-5" - cron: '0 23 * * 1-5'
workflow_dispatch: workflow_dispatch:
name: Nightly CI name: Nightly CI
@@ -9,90 +11,69 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
permissions: env:
issues: write RUST_TOOLCHAIN: nightly-2023-10-21
jobs: jobs:
sqlness-test: sqlness:
name: Run sqlness test name: Sqlness Test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} runs-on: ${{ matrix.os }}
runs-on: ubuntu-22.04 strategy:
steps: matrix:
- name: Checkout os: [ windows-latest-8-cores ]
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-kind
path: /tmp/kind/
retention-days: 3
sqlness-windows:
name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4.1.0
- uses: ./.github/actions/setup-cyborg - uses: arduino/setup-protoc@v1
- uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Run sqlness - name: Run sqlness
run: make sqlness-test run: cargo sqlness
env: - name: Notify slack if failed
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs
if: failure() if: failure()
uses: actions/upload-artifact@v4 uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
with:
payload: |
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with: with:
name: sqlness-logs name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness* path: ${{ runner.temp }}/greptime-*.log
retention-days: 3 retention-days: 3
test-on-windows: test-on-windows:
name: Run tests on Windows runs-on: windows-latest-8-cores
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v4.1.0
- uses: ./.github/actions/setup-cyborg - uses: arduino/setup-protoc@v1
- uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install Rust toolchain - name: Install Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview components: llvm-tools-preview
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest - name: Install Cargo Nextest
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v4
with: with:
python-version: "3.10" python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
run: pip install pyarrow numpy run: pip install pyarrow
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
@@ -100,56 +81,18 @@ jobs:
- name: Running tests - name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard run: cargo nextest run -F pyo3_backend,dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493 GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_REGION: ${{ secrets.S3_REGION }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed
check-status: if: failure()
name: Check status uses: slackapi/slack-github-action@v1.23.0
needs: [sqlness-test, sqlness-windows, test-on-windows]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }}
steps:
- name: Set check result
id: set-check-result
run: |
echo "check-result=success" >> $GITHUB_OUTPUT
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [check-status]
runs-on: ubuntu-20.04
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result == 'success' }}
with: with:
payload: | payload: |
{"text": "Nightly CI has completed successfully."} {"text": "Nightly CI failed for cargo test"}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result != 'success' }}
with:
payload: |
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -0,0 +1,26 @@
name: Nightly functional tests
on:
schedule:
# At 00:00 on Tuesday.
- cron: '0 0 * * 2'
workflow_dispatch:
jobs:
sqlness-test:
name: Run sqlness test
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}

29
.github/workflows/pr-title-checker.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: "PR Title Checker"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
- labeled
- unlabeled
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -1,14 +1,12 @@
name: Release dev-builder images name: Release dev-builder images
on: on:
push:
branches:
- main
paths:
- rust-toolchain.toml
- 'docker/dev-builder/**'
workflow_dispatch: # Allows you to run this workflow manually. workflow_dispatch: # Allows you to run this workflow manually.
inputs: inputs:
version:
description: Version of the dev-builder
required: false
default: latest
release_dev_builder_ubuntu_image: release_dev_builder_ubuntu_image:
type: boolean type: boolean
description: Release dev-builder-ubuntu image description: Release dev-builder-ubuntu image
@@ -30,103 +28,22 @@ jobs:
name: Release dev builder images name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job. if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores runs-on: ubuntu-20.04-16-cores
outputs:
version: ${{ steps.set-version.outputs.version }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Configure build image version
id: set-version
shell: bash
run: |
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
- name: Build and push dev builder images - name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images uses: ./.github/actions/build-dev-builder-images
with: with:
version: ${{ env.VERSION }} version: ${{ inputs.version }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }} dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }} build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }} build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }} build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.ECR_REGION }}
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@v2
env:
AWS_REGION: ${{ vars.ECR_REGION }}
with:
registry-type: public
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container. release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region name: Release dev builder images to CN region
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
@@ -134,39 +51,35 @@ jobs:
release-dev-builder-images release-dev-builder-images
] ]
steps: steps:
- name: Login to AliCloud Container Registry
uses: docker/login-action@v3
with:
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image }} if: ${{ inputs.release_dev_builder_android_image }}
env:
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ --dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
quay.io/skopeo/stable:latest \ docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -33,7 +33,6 @@ on:
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64 default: ec2-c6g.4xlarge-arm64
options: options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G - ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G - ec2-c6g.4xlarge-arm64 # 16C32G
@@ -83,6 +82,7 @@ on:
# Use env variables to control all the release process. # Use env variables to control all the release process.
env: env:
# The arguments of building greptime. # The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2023-10-21
CARGO_PROFILE: nightly CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,12 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release. # Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.10.0 NEXT_RELEASE_VERSION: v0.5.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs: jobs:
allocate-runners: allocate-runners:
@@ -107,7 +102,7 @@ jobs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }} macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
windows-runner: windows-2022-8-cores windows-runner: windows-latest-8-cores
# The following EC2 resource id will be used for resource releasing. # The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
@@ -119,15 +114,10 @@ jobs:
version: ${{ steps.create-version.outputs.version }} version: ${{ steps.create-version.outputs.version }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check Rust toolchain version
shell: bash
run: |
./scripts/check-builder-rust-version.sh
# The create-version will create a global variable named 'version' in the global workflows. # The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }}); # - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313; # - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
@@ -178,7 +168,7 @@ jobs:
] ]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -188,8 +178,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -199,7 +187,7 @@ jobs:
] ]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }} runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -209,8 +197,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-macos-artifacts: build-macos-artifacts:
name: Build macOS artifacts name: Build macOS artifacts
@@ -235,32 +221,25 @@ jobs:
arch: x86_64-apple-darwin arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3 artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
needs: [ needs: [
allocate-runners, allocate-runners,
] ]
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }} if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: ./.github/actions/build-macos-artifacts - uses: ./.github/actions/build-macos-artifacts
with: with:
arch: ${{ matrix.arch }} arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }} features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming. disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }} artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result
id: set-build-macos-result
run: |
echo "build-macos-result=success" >> $GITHUB_OUTPUT
build-windows-artifacts: build-windows-artifacts:
name: Build Windows artifacts name: Build Windows artifacts
strategy: strategy:
@@ -276,8 +255,6 @@ jobs:
features: pyo3_backend,servers/dashboard features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3 artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
needs: [ needs: [
allocate-runners, allocate-runners,
] ]
@@ -285,24 +262,20 @@ jobs:
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: ./.github/actions/build-windows-artifacts - uses: ./.github/actions/build-windows-artifacts
with: with:
arch: ${{ matrix.arch }} arch: ${{ matrix.arch }}
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
cargo-profile: ${{ env.CARGO_PROFILE }} cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }} features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }} disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }} artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build windows result
id: set-build-windows-result
run: |
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }} if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
@@ -312,10 +285,8 @@ jobs:
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-2004-16-cores runs-on: ubuntu-2004-16-cores
outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -328,11 +299,6 @@ jobs:
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
- name: Set build image result
id: set-build-image-result
run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts: release-cn-artifacts:
name: Release artifacts to CN region name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }} if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
@@ -350,7 +316,7 @@ jobs:
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true continue-on-error: true
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -386,7 +352,7 @@ jobs:
] ]
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -409,7 +375,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -434,7 +400,7 @@ jobs:
] ]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -447,38 +413,3 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }} aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,
build-macos-artifacts,
build-windows-artifacts,
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
- name: Notify release successful result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has completed successfully."}
- name: Notify release failed result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -1,24 +0,0 @@
name: Schedule Management
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
maintenance:
name: Periodic Maintenance
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts
env:
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -1,21 +0,0 @@
name: "Semantic Pull Request"
on:
pull_request_target:
types:
- opened
- reopened
- edited
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

26
.github/workflows/size-label.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
name: size-labeler
on: [pull_request]
jobs:
labeler:
runs-on: ubuntu-latest
name: Label the PR size
steps:
- uses: codelytv/pr-size-labeler@v1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
s_label: 'Size: S'
s_max_size: '100'
m_label: 'Size: M'
m_max_size: '500'
l_label: 'Size: L'
l_max_size: '1000'
xl_label: 'Size: XL'
fail_if_xl: 'false'
message_if_xl: >
This PR exceeds the recommended size of 1000 lines.
Please make sure you are NOT addressing multiple issues with one PR.
Note this PR might be rejected due to its size.
github_api_url: 'api.github.com'
files_to_ignore: 'Cargo.lock'

4
.gitignore vendored
View File

@@ -46,7 +46,3 @@ benchmarks/data
*.code-workspace *.code-workspace
venv/ venv/
# Fuzz tests
tests-fuzz/artifacts/
tests-fuzz/corpus/

View File

@@ -16,7 +16,6 @@ repos:
hooks: hooks:
- id: fmt - id: fmt
- id: clippy - id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"] args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
stages: [push] stages: [push]
- id: cargo-check - id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"]

View File

@@ -1,43 +0,0 @@
# GreptimeDB Authors
## Individual Committers (in alphabetical order)
* [CookiePieWw](https://github.com/CookiePieWw)
* [KKould](https://github.com/KKould)
* [NiwakaDev](https://github.com/NiwakaDev)
* [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj)
## Team Members (in alphabetical order)
* [Breeze-P](https://github.com/Breeze-P)
* [GrepTime](https://github.com/GrepTime)
* [MichaelScofield](https://github.com/MichaelScofield)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [ZonaHex](https://github.com/ZonaHex)
* [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678)
* [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996)
* [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008)
* [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [tisonkun](https://github.com/tisonkun)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [zyy17](https://github.com/zyy17)
## All Contributors
[![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -2,11 +2,7 @@
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here. Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer. Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs). Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity. It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md) - Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working. - Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
- Check the closed issues before opening your issue. - Check the closed issues before opening your issue.
- Try to follow the existing style of the code. - Try to follow the existing style of the code.
@@ -30,7 +26,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
## Code of Conduct ## Code of Conduct
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member. Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
## License ## License
@@ -54,8 +50,8 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`. - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks #### `pre-commit` Hooks

7402
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
[workspace] [workspace]
members = [ members = [
"benchmarks",
"src/api", "src/api",
"src/auth", "src/auth",
"src/catalog", "src/catalog",
"src/cache",
"src/client", "src/client",
"src/cmd", "src/cmd",
"src/common/base", "src/common/base",
@@ -11,7 +11,6 @@ members = [
"src/common/config", "src/common/config",
"src/common/datasource", "src/common/datasource",
"src/common/error", "src/common/error",
"src/common/frontend",
"src/common/function", "src/common/function",
"src/common/macro", "src/common/macro",
"src/common/greptimedb-telemetry", "src/common/greptimedb-telemetry",
@@ -19,8 +18,6 @@ members = [
"src/common/grpc-expr", "src/common/grpc-expr",
"src/common/mem-prof", "src/common/mem-prof",
"src/common/meta", "src/common/meta",
"src/common/plugins",
"src/common/pprof",
"src/common/procedure", "src/common/procedure",
"src/common/procedure-test", "src/common/procedure-test",
"src/common/query", "src/common/query",
@@ -32,11 +29,9 @@ members = [
"src/common/time", "src/common/time",
"src/common/decimal", "src/common/decimal",
"src/common/version", "src/common/version",
"src/common/wal",
"src/datanode", "src/datanode",
"src/datatypes", "src/datatypes",
"src/file-engine", "src/file-engine",
"src/flow",
"src/frontend", "src/frontend",
"src/log-store", "src/log-store",
"src/meta-client", "src/meta-client",
@@ -46,7 +41,6 @@ members = [
"src/object-store", "src/object-store",
"src/operator", "src/operator",
"src/partition", "src/partition",
"src/pipeline",
"src/plugins", "src/plugins",
"src/promql", "src/promql",
"src/puffin", "src/puffin",
@@ -58,155 +52,98 @@ members = [
"src/store-api", "src/store-api",
"src/table", "src/table",
"src/index", "src/index",
"tests-fuzz",
"tests-integration", "tests-integration",
"tests/runner", "tests/runner",
] ]
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.9.5" version = "0.4.4"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies] [workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false).
#
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] } ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3" aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] } arrow = { version = "47.0" }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] } arrow-array = "47.0"
arrow-flight = "51.0" arrow-flight = "47.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] } arrow-schema = { version = "47.0", features = ["serde"] }
arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
base64 = "0.21" base64 = "0.21"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12" bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] } datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
config = "0.13.0" datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
crossbeam-utils = "0.8" datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
dashmap = "5.4" datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" } datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" etcd-client = "0.12"
etcd-client = { version = "0.13" }
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
itertools = "0.10" itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4" lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" } meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
mockall = "0.11.4" mockall = "0.11.4"
moka = "0.12" moka = "0.12"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { version = "0.5", features = [ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
"with-serde",
"logs",
] } ] }
parking_lot = "0.12" parquet = "47.0"
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4.3", features = ["ser"] }
prost = "0.12" prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
rand = "0.8" rand = "0.8"
ratelimit = "0.9"
regex = "1.8" regex = "1.8"
regex-automata = { version = "0.4" } regex-automata = { version = "0.1", features = ["transducer"] }
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
"stream", "stream",
"multipart",
] } ] }
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
rustc-hash = "2.0"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = "1.0"
serde_with = "3" smallvec = "1"
shadow-rs = "0.35" snafu = "0.7"
similar-asserts = "1.6.0" # on branch v0.38.x
smallvec = { version = "1", features = ["serde"] } sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor", "visitor",
] } ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.40", features = ["full"] } tokio = { version = "1.28", features = ["full"] }
tokio-postgres = "0.7"
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.7"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] } tonic = { version = "0.10", features = ["tls"] }
tower = { version = "0.4" } uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
auth = { path = "src/auth" } auth = { path = "src/auth" }
cache = { path = "src/cache" }
catalog = { path = "src/catalog" } catalog = { path = "src/catalog" }
client = { path = "src/client" } client = { path = "src/client" }
cmd = { path = "src/cmd", default-features = false } cmd = { path = "src/cmd" }
common-base = { path = "src/common/base" } common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" } common-catalog = { path = "src/common/catalog" }
common-config = { path = "src/common/config" } common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" } common-datasource = { path = "src/common/datasource" }
common-decimal = { path = "src/common/decimal" } common-decimal = { path = "src/common/decimal" }
common-error = { path = "src/common/error" } common-error = { path = "src/common/error" }
common-frontend = { path = "src/common/frontend" }
common-function = { path = "src/common/function" } common-function = { path = "src/common/function" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" } common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" } common-grpc = { path = "src/common/grpc" }
@@ -214,7 +151,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" } common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" } common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" } common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" } common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" } common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" } common-procedure-test = { path = "src/common/procedure-test" }
@@ -225,25 +161,20 @@ common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" } common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" } common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" } common-version = { path = "src/common/version" }
common-wal = { path = "src/common/wal" }
datanode = { path = "src/datanode" } datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" } datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" } file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" } frontend = { path = "src/frontend" }
frontend = { path = "src/frontend", default-features = false }
index = { path = "src/index" }
log-store = { path = "src/log-store" } log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" } meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" } meta-srv = { path = "src/meta-srv" }
metric-engine = { path = "src/metric-engine" } mito = { path = "src/mito" }
mito2 = { path = "src/mito2" } mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" } object-store = { path = "src/object-store" }
operator = { path = "src/operator" } operator = { path = "src/operator" }
partition = { path = "src/partition" } partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" } plugins = { path = "src/plugins" }
promql = { path = "src/promql" } promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
query = { path = "src/query" } query = { path = "src/query" }
script = { path = "src/script" } script = { path = "src/script" }
servers = { path = "src/servers" } servers = { path = "src/servers" }
@@ -253,37 +184,16 @@ store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" } substrait = { path = "src/common/substrait" }
table = { path = "src/table" } table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "a10facb353b41460eeb98578868ebf19c2084fac" rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
[profile.release] [profile.release]
debug = 1 debug = true
[profile.nightly] [profile.nightly]
inherits = "release" inherits = "release"
strip = "debuginfo" strip = true
lto = "thin" lto = "thin"
debug = false debug = false
incremental = false incremental = false
[profile.ci]
inherits = "dev"
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -3,12 +3,10 @@ CARGO_PROFILE ?=
FEATURES ?= FEATURES ?=
TARGET_DIR ?= TARGET_DIR ?=
TARGET ?= TARGET ?=
BUILD_BIN ?= greptime
CARGO_BUILD_OPTS := --locked CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu BASE_IMAGE ?= ubuntu
@@ -16,7 +14,6 @@ RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi) OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
# The arguments for running integration tests. # The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9 ETCD_VERSION ?= v3.5.9
@@ -48,18 +45,12 @@ ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET} CARGO_BUILD_OPTS += --target ${TARGET}
endif endif
ifneq ($(strip $(BUILD_BIN)),)
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
endif
ifneq ($(strip $(RELEASE)),) ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release CARGO_BUILD_OPTS += --release
endif endif
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all) ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif endif
@@ -74,11 +65,11 @@ endif
build: ## Build debug version greptime. build: ## Build debug version greptime.
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS} cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
.PHONY: build-by-dev-builder .POHNY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder. build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make build \ make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \ CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \ CARGO_PROFILE=${CARGO_PROFILE} \
@@ -92,7 +83,7 @@ build-by-dev-builder: ## Build greptime by dev-builder.
build-android-bin: ## Build greptime binary for android. build-android-bin: ## Build greptime binary for android.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
make build \ make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \ CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \ CARGO_PROFILE=release \
@@ -106,8 +97,8 @@ build-android-bin: ## Build greptime binary for android.
strip-android-bin: build-android-bin ## Strip greptime binary for android. strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb \ -v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime' bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: clean .PHONY: clean
clean: ## Clean the project. clean: ## Clean the project.
@@ -146,35 +137,23 @@ dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \ docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \ --build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \ -f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} . -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx .PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder. multi-platform-buildx: ## Create buildx multi-platform builder.
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
##@ Test ##@ Test
.PHONY: test
test: nextest ## Run unit and integration tests. test: nextest ## Run unit and integration tests.
cargo nextest run ${NEXTEST_OPTS} cargo nextest run ${NEXTEST_OPTS}
.PHONY: nextest .PHONY: nextest ## Install nextest tools.
nextest: ## Install nextest tools. nextest:
cargo --list | grep nextest || cargo install cargo-nextest --locked cargo --list | grep nextest || cargo install cargo-nextest --locked
.PHONY: sqlness-test .PHONY: sqlness-test
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS} cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check .PHONY: check
check: ## Cargo check all the targets. check: ## Cargo check all the targets.
@@ -184,14 +163,9 @@ check: ## Cargo check all the targets.
clippy: ## Check clippy rules. clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets --all-features -- -D warnings cargo clippy --workspace --all-targets --all-features -- -D warnings
.PHONY: fix-clippy
fix-clippy: ## Fix clippy violations.
cargo clippy --workspace --all-targets --all-features --fix
.PHONY: fmt-check .PHONY: fmt-check
fmt-check: ## Check code format. fmt-check: ## Check code format.
cargo fmt --all -- --check cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd .PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose. start-etcd: ## Start single node etcd for testing purpose.
@@ -205,27 +179,9 @@ stop-etcd: ## Stop single node etcd for testing purpose.
run-it-in-container: start-etcd ## Run integration tests in dev-builder. run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \ docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS} make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: start-cluster
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
.PHONY: stop-cluster
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:v0.1.3 \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md
##@ General ##@ General
# The help target prints out all targets with their descriptions organized # The help target prints out all targets with their descriptions organized

253
README.md
View File

@@ -1,159 +1,150 @@
<p align="center"> <p align="center">
<picture> <picture>
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png"> <source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding-dark.png"> <source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png" width="400px"> <img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
</picture> </picture>
</p> </p>
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
<div align="center">
<h3 align="center"> <h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> | The next-generation hybrid time-series/analytics processing database in the cloud
<a href="https://docs.greptime.com/">User Guide</a> | </h3>
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <p align="center">
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/> <a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
</a> &nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/> &nbsp;
</a> <a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
<a href="https://hub.docker.com/r/greptime/greptimedb/"> </p>
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
</a>
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
</a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
</a>
<br/> <p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
<a href="https://greptime.com/slack"> ## What is GreptimeDB
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
</a>
<a href="https://twitter.com/greptime">
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
</a>
<a href="https://www.linkedin.com/company/greptime/">
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
</a>
</div>
## Introduction GreptimeDB is an open-source time-series database with a special focus on
scalability, analytical capabilities and efficiency. It's designed to work on
infrastructure of the cloud era, and users benefit from its elasticity and commodity
storage.
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale. Our core developers have been building time-series data platform
for years. Based on their best-practices, GreptimeDB is born to give you:
## Why GreptimeDB - A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
- Flexible indexes, tackling high cardinality issues down
- Distributed, parallel query execution, leveraging elastic computing resource
- Native SQL, and Python scripting for advanced analytical scenarios
- Widely adopted database protocols and APIs, native PromQL supports
- Extensible table engine architecture for extensive workloads
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you: ## Quick Start
* **Unified all kinds of time series** ### [GreptimePlay](https://greptime.com/playground)
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
* **Cloud-Edge collaboration**
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
* **Cloud-native distributed database**
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
* **Performance and Cost-effective**
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser. Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/) ### Build
Start instantly with a free cluster. #### Build from Source
### 3. Docker Image To compile GreptimeDB from source, you'll need:
To install GreptimeDB locally, the recommended way is via Docker: - C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
```shell #### Build with Docker
docker pull greptime/greptimedb
A docker image with necessary dependencies is provided:
```
docker build --network host -f docker/Dockerfile -t greptimedb .
``` ```
Start a GreptimeDB container with: ### Run
Start GreptimeDB from source code, in standalone mode:
```shell
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
``` ```
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
* [User Guide](https://docs.greptime.com/user-guide/overview)
* [Demos](https://github.com/GreptimeTeam/demo-scene)
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
## Build
Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
```shell
make
```
Run a standalone server:
```shell
cargo run -- standalone start cargo run -- standalone start
``` ```
## Extension Or if you built from docker:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
### Get started
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
## Resources
### Installation
- [Pre-built Binaries](https://greptime.com/download):
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts/overview)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard ### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard) - [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK ### SDK
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go) - [GreptimeDB C++ Client](https://github.com/GreptimeTeam/greptimedb-client-cpp)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java) - [GreptimeDB Erlang Client](https://github.com/GreptimeTeam/greptimedb-client-erl)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp) - [GreptimeDB Go Client](https://github.com/GreptimeTeam/greptimedb-client-go)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl) - [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust) - [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js) - [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
## Project Status ## Project Status
The current version has not yet reached the standards for General Availability. This project is in its early stage and under heavy development. We move fast and
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412) break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available. For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community ## Community
@@ -163,37 +154,29 @@ and what went wrong. If you have any questions or if you would like to get invol
community, please check out: community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack) - GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions) - GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [website](https://greptime.com) - Greptime official [Website](https://greptime.com)
In addition, you may: In addition, you may:
- View our official [Blog](https://greptime.com/blogs/) - View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/) - Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime) - Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License ## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
open contributions and allowing you to use the software however you want. open contributions and allowing you to use the software however you want.
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
## Contributing ## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information. Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement ## Acknowledgement
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md). - GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/). - GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting. - GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

16
benchmarks/Cargo.toml Normal file
View File

@@ -0,0 +1,16 @@
[package]
name = "benchmarks"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
arrow.workspace = true
chrono.workspace = true
clap = { version = "4.0", features = ["derive"] }
client.workspace = true
futures-util.workspace = true
indicatif = "0.17.1"
itertools.workspace = true
parquet.workspace = true
tokio.workspace = true

View File

@@ -0,0 +1,543 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
};
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
struct Args {
/// Path to the dataset
#[arg(short, long)]
path: Option<String>,
/// Batch size of insert request.
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
batch_size: usize,
/// Number of client threads on write (parallel on file level)
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
thread_num: usize,
/// Number of query iteration
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
iter_num: usize,
#[arg(long = "skip-write")]
skip_write: bool,
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
std::fs::read_dir(path)
.unwrap()
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
.collect()
}
fn new_table_name() -> String {
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
}
async fn write_data(
table_name: &str,
batch_size: usize,
db: &Database,
path: PathBuf,
mpb: MultiProgress,
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: table_name.to_string(),
columns,
row_count,
};
let requests = InsertRequests {
inserts: vec![request],
};
let now = Instant::now();
db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let semantic_type = match field.name().as_str() {
"VendorID" => SemanticType::Tag,
"tpep_pickup_datetime" => SemanticType::Timestamp,
_ => SemanticType::Field,
};
let column = Column {
column_name: field.name().clone(),
values: Some(values),
null_mask: array
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
semantic_type: semantic_type as i32,
..Default::default()
};
columns.push(column);
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
(
Values {
timestamp_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Interval(_)
| DataType::Binary
| DataType::FixedSizeBinary(_)
| DataType::LargeBinary
| DataType::LargeUtf8
| DataType::List(_)
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr(table_name: &str) -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
desc: "".to_string(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "passenger_count".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "trip_distance".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "RatecodeID".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "PULocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "DOLocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "payment_type".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "fare_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "extra".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "mta_tax".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tip_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tolls_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "improvement_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "total_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "congestion_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "airport_fee".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: true,
table_options: Default::default(),
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set(table_name: &str) -> HashMap<String, String> {
HashMap::from([
(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {table_name};"),
),
(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
)
])
}
async fn do_write(args: &Args, db: &Database, table_name: &str) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = db.create(create_table_expr(table_name)).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
)
.unwrap()
.progress_chars("##-");
let multi_progress_bar = MultiProgress::new();
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
file_progress.inc(0);
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
}
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for (query_name, query) in query_set(table_name) {
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let res = db.sql(&query).await.unwrap();
match res {
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
Output::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap();
}
}
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis(),
);
}
}
}
fn main() {
let args = Args::parse();
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.thread_num)
.enable_all()
.build()
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let table_name = new_table_name();
if !args.skip_write {
do_write(&args, &db, &table_name).await;
}
if !args.skip_read {
do_query(args.iter_num, &db, &table_name).await;
}
})
}

View File

@@ -1,127 +0,0 @@
# https://git-cliff.org/docs/configuration
[remote.github]
owner = "GreptimeTeam"
repo = "greptimedb"
[changelog]
header = ""
footer = ""
# template for the changelog body
# https://keats.github.io/tera/docs/#introduction
body = """
# {{ version }}
Release date: {{ timestamp | date(format="%B %d, %Y") }}
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
{%- if breakings | length > 0 %}
## Breaking changes
{% for commit in breakings %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor %}
{%- endif -%}
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
{% for group, commits in grouped_commits %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor -%}
{% endfor %}
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
{% raw %}\n{% endraw -%}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
{%- endfor -%}
{% if github.contributors | length != 0 %}
{% raw %}\n{% endraw -%}
## All Contributors
We would like to thank the following contributors from the GreptimeDB community:
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
{%- set bots = ['dependabot[bot]'] %}
{% for contributor in contributors %}
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
{%- if loop.first -%}
[@{{ contributor }}](https://github.com/{{ contributor }})
{%- else -%}
, [@{{ contributor }}](https://github.com/{{ contributor }})
{%- endif -%}
{%- endfor %}
{%- endif %}
{% raw %}\n{% endraw %}
{%- macro remote_url() -%}
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
{%- endmacro -%}
"""
trim = true
[git]
# parse the commits based on https://www.conventionalcommits.org
conventional_commits = true
# filter out the commits that are not conventional
filter_unconventional = true
# process each line of a commit as an individual commit
split_commits = false
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
{ message = "^chore\\(release\\): prepare for", skip = true },
{ message = "^chore\\(deps.*\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers
filter_commits = false
# regex for matching git tags
# tag_pattern = "v[0-9].*"
# regex for skipping tags
# skip_tags = ""
# regex for ignoring tags
ignore_tags = ".*-nightly-.*"
# sort the tags topologically
topo_order = false
# sort the commits inside sections by oldest/newest order
sort_commits = "oldest"
# limit the number of commits included in the changelog.
# limit_commits = 42

View File

@@ -8,6 +8,5 @@ coverage:
ignore: ignore:
- "**/error*.rs" # ignore all error.rs files - "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner - "tests/runner/*.rs" # ignore integration test runner
- "tests-integration/**/*.rs" # ignore integration tests
comment: # this is a top-level key comment: # this is a top-level key
layout: "diff" layout: "diff"

View File

@@ -1,31 +0,0 @@
# Configurations
- [Configurations](#configurations)
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Distributed Mode
### Frontend
{{ toml2docs "./frontend.example.toml" }}
### Metasrv
{{ toml2docs "./metasrv.example.toml" }}
### Datanode
{{ toml2docs "./datanode.example.toml" }}
### Flownode
{{ toml2docs "./flownode.example.toml"}}

View File

@@ -1,547 +0,0 @@
# Configurations
- [Configurations](#configurations)
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | Unset | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
## Distributed Mode
### Frontend
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Metasrv
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Datanode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Flownode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |

View File

@@ -1,652 +1,89 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. # Node running mode, see `standalone.example.toml`.
mode = "standalone" mode = "distributed"
# The datanode identifier, should be unique.
## The datanode identifier and should be unique in the cluster.
## @toml2docs:none-default
node_id = 42 node_id = 42
# gRPC server address, "127.0.0.1:3001" by default.
## Start services after regions have obtained leases. rpc_addr = "127.0.0.1:3001"
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv. # Hostname of this node.
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
# Start services after regions have obtained leases.
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false require_lease_before_startup = false
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## Deprecated, use `grpc.addr` instead.
## @toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead.
## @toml2docs:none-default
rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead.
## @toml2docs:none-default
rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. # Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
interval = "3s" interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv. # Metasrv client options.
retry_interval = "3s"
## The metasrv client options.
[meta_client] [meta_client]
## The addresses of the metasrv. # Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"] metasrv_addrs = ["127.0.0.1:3002"]
# Heartbeat timeout, 500 milliseconds by default.
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms" heartbeat_timeout = "500ms"
# Operation timeout, 3 seconds by default.
## DDL timeout. timeout = "3s"
ddl_timeout = "10s" # Connect server timeout, 1 second by default.
## Connect server timeout.
connect_timeout = "1s" connect_timeout = "1s"
# `TCP_NODELAY` option for accepted connections, true by default.
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true tcp_nodelay = true
## The configuration about the cache of the metadata. # WAL options, see `standalone.example.toml`.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The WAL options.
[wal] [wal]
## The provider of the WAL. # WAL data directory
## - `raft_engine`: the wal is stored in the local file system by raft-engine. # dir = "/tmp/greptimedb/wal"
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine"
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB" file_size = "256MB"
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB" purge_threshold = "4GB"
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m" purge_interval = "10m"
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128 read_batch_size = 128
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false sync_write = false
## Whether to reuse logically truncated log files. # Storage options, see `standalone.example.toml`.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Whether to enable WAL index creation.
## **It's only used when the provider is `kafka`**.
create_index = true
## The interval for dumping WAL indexes.
## **It's only used when the provider is `kafka`**.
dump_index_interval = "60s"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage] [storage]
## The working home directory. # The working home directory.
data_home = "/tmp/greptimedb/" data_home = "/tmp/greptimedb/"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
## Cache configuration for object storage such as 'S3' etc. # Cache configuration for object storage such as 'S3' etc.
## The local file cache directory. # The local file cache directory
## @toml2docs:none-default # cache_path = "/path/local_cache"
cache_path = "/path/local_cache" # The local file cache capacity in bytes.
# cache_capacity = "256MB"
## The local file cache capacity in bytes. # Mito engine options
## @toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]] [[region_engine]]
## The Mito engine options.
[region_engine.mito] [region_engine.mito]
# Number of region workers
## Number of region workers. num_workers = 8
#+ num_workers = 8 # Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
# Max number of running background jobs
## Max number of running background flush jobs (default: 1/2 of cpu cores). max_background_jobs = 4
## @toml2docs:none-default="Auto" # Interval to auto flush a region if it has not flushed yet.
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
# Global write buffer size for all regions.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. global_write_buffer_size = "1GB"
## @toml2docs:none-default="Auto" # Global write buffer size threshold to reject write requests (default 2G).
#+ global_write_buffer_size = "1GB" global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` sst_meta_cache_size = "128MB"
## @toml2docs:none-default="Auto" # Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
#+ global_write_buffer_reject_size = "2GB" vector_cache_size = "512MB"
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
## Cache size for SST metadata. Setting it to 0 to disable the cache. page_cache_size = "512MB"
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. # Buffer size for SST writing.
## @toml2docs:none-default="Auto"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores). # Log options, see `standalone.example.toml`
## - `0`: using the default value (1/4 of cpu cores). # [logging]
## - `1`: scan in current thread. # dir = "/tmp/greptimedb/logs"
## - `n`: scan in parallelism n. # level = "info"
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,108 +0,0 @@
## The running mode of the flownode. It can be `standalone` or `distributed`.
mode = "distributed"
## The flownode identifier and should be unique in the cluster.
## @toml2docs:none-default
node_id = 14
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:6800"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 2
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,237 +1,79 @@
## The default timezone of the server. # Node running mode, see `standalone.example.toml`.
## @toml2docs:none-default mode = "distributed"
default_timezone = "UTC"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. # Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
interval = "18s" interval = "5s"
# Interval for retry sending heartbeat task, 5 seconds by default.
retry_interval = "5s"
## Interval for retrying to send heartbeat messages to the metasrv. # HTTP server options, see `standalone.example.toml`.
retry_interval = "3s"
## The HTTP server options.
[http] [http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s" timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. # gRPC server options, see `standalone.example.toml`.
[grpc] [grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section. # MySQL server options, see `standalone.example.toml`.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql] [mysql]
## Whether to enable.
enable = true enable = true
## The addr to bind the MySQL server.
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2 runtime_size = 2
# MySQL server TLS options. # MySQL server TLS options, see `standalone.example.toml`.
[mysql.tls] [mysql.tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
mode = "disable" mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # PostgresSQL server options, see `standalone.example.toml`.
watch = false
## PostgresSQL server options.
[postgres] [postgres]
## Whether to enable
enable = true enable = true
## The addr to bind the PostgresSQL server.
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section. # PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres.tls] [postgres.tls]
## TLS mode.
mode = "disable" mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = "" cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # OpenTSDB protocol options, see `standalone.example.toml`.
watch = false
## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable OpenTSDB put in HTTP API.
enable = true enable = true
addr = "127.0.0.1:4242"
runtime_size = 2
## InfluxDB protocol options. # InfluxDB protocol options, see `standalone.example.toml`.
[influxdb] [influxdb]
## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Prometheus remote storage options # Prometheus remote storage options, see `standalone.example.toml`.
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true enable = true
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
## The metasrv client options. # Metasrv client options, see `datanode.example.toml`.
[meta_client] [meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"] metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s" timeout = "3s"
# DDL timeouts options.
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s" ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s" connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true tcp_nodelay = true
## The configuration about the cache of the metadata. # Log options, see `standalone.example.toml`
metadata_cache_max_capacity = 100000 # [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
## TTL of the metadata cache. # Datanode options.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## Datanode options.
[datanode] [datanode]
## Datanode client options. # Datanode client options.
[datanode.client] [datanode.client]
timeout = "10s"
connect_timeout = "10s" connect_timeout = "10s"
tcp_nodelay = true tcp_nodelay = true
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,224 +1,44 @@
## The working home directory. # The working home directory.
data_home = "/tmp/metasrv/" data_home = "/tmp/metasrv/"
# The bind address of metasrv, "127.0.0.1:3002" by default.
## The bind address of metasrv.
bind_addr = "127.0.0.1:3002" bind_addr = "127.0.0.1:3002"
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002" server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
## Store server address default to etcd store.
store_addr = "127.0.0.1:2379" store_addr = "127.0.0.1:2379"
# Datanode selector type.
## Datanode selector type. # - "LeaseBased" (default value).
## - `round_robin` (default value) # - "LoadBased"
## - `lease_based` # For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
## - `load_based` selector = "LeaseBased"
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". # Store data in memory, false by default.
selector = "round_robin"
## Store data in memory.
use_memory_store = false use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
## Whether to enable greptimedb telemetry.
enable_telemetry = true enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix. # Log options, see `standalone.example.toml`
store_key_prefix = "" # [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"
## Whether to enable region failover. # Procedure storage options.
## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL
## - Using shared storage (e.g., s3).
enable_region_failover = false
## The datastore for meta server.
backend = "EtcdStore"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## Procedure storage options.
[procedure] [procedure]
# Procedure max retry time.
## Procedure max retry time.
max_retry_times = 12 max_retry_times = 12
# Initial retry delay of procedures, increases exponentially
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
## Auto split large value
## GreptimeDB procedure uses etcd as the default metadata storage backend.
## The etcd the maximum size of any request is 1.5 MiB
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options. # Failure detectors options.
[failure_detector] [failure_detector]
## The threshold value used by the failure detector to determine failure conditions.
threshold = 8.0 threshold = 8.0
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
min_std_deviation = "100ms" min_std_deviation = "100ms"
acceptable_heartbeat_pause = "3000ms"
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
acceptable_heartbeat_pause = "10000ms"
## The initial estimate of the heartbeat interval used by the failure detector.
first_heartbeat_estimate = "1000ms" first_heartbeat_estimate = "1000ms"
# # Datanode options. # # Datanode options.
[datanode] # [datanode]
# # Datanode client options. # # Datanode client options.
[datanode.client] # [datanode.client_options]
# timeout = "10s"
## Operation timeout. # connect_timeout = "10s"
timeout = "10s" # tcp_nodelay = true
## Connect server timeout.
connect_timeout = "10s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
[wal]
# Available wal providers:
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
provider = "raft_engine"
# Kafka wal config.
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,696 +1,166 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. # Node running mode, "standalone" or "distributed".
mode = "standalone" mode = "standalone"
# Whether to enable greptimedb telemetry, true by default.
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true enable_telemetry = true
## The default timezone of the server. # HTTP server options.
## @toml2docs:none-default
default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The HTTP server options.
[http] [http]
## The address to bind the HTTP server. # Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. # HTTP request timeout, 30s by default.
timeout = "30s" timeout = "30s"
## HTTP request body limit. # HTTP request body limit, 64Mb by default.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. # the following units are supported: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB
## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## The gRPC server options. # gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. # Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads. # The number of server worker threads, 8 by default.
runtime_size = 8 runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section. # MySQL server options.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql] [mysql]
## Whether to enable. # Whether to enable
enable = true enable = true
## The addr to bind the MySQL server. # Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. # The number of server worker threads, 2 by default.
runtime_size = 2 runtime_size = 2
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html # - "disable" (default value)
## - `disable` (default value) # - "prefer"
## - `prefer` # - "require"
## - `require` # - "verify-ca"
## - `verify-ca` # - "verify-full"
## - `verify-full`
mode = "disable" mode = "disable"
# Certificate file path.
## Certificate file path.
## @toml2docs:none-default
cert_path = "" cert_path = ""
# Private key file path.
## Private key file path.
## @toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # PostgresSQL server options.
watch = false
## PostgresSQL server options.
[postgres] [postgres]
## Whether to enable # Whether to enable
enable = true enable = true
## The addr to bind the PostgresSQL server. # Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. # The number of server worker threads, 2 by default.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section. # PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres.tls] [postgres.tls]
## TLS mode. # TLS mode.
mode = "disable" mode = "disable"
# certificate file path.
## Certificate file path.
## @toml2docs:none-default
cert_path = "" cert_path = ""
# private key file path.
## Private key file path.
## @toml2docs:none-default
key_path = "" key_path = ""
## Watch for Certificate and key file change and auto reload # OpenTSDB protocol options.
watch = false
## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable OpenTSDB put in HTTP API. # Whether to enable
enable = true enable = true
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
## InfluxDB protocol options. # InfluxDB protocol options.
[influxdb] [influxdb]
## Whether to enable InfluxDB protocol in HTTP API. # Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true enable = true
## Prometheus remote storage options # Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. # Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true enable = true
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
## The WAL options. # WAL options.
[wal] [wal]
## The provider of the WAL. # WAL data directory
## - `raft_engine`: the wal is stored in the local file system by raft-engine. # dir = "/tmp/greptimedb/wal"
## - `kafka`: it's remote wal that data is stored in Kafka. # WAL file size in bytes.
provider = "raft_engine"
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB" file_size = "256MB"
# WAL purge threshold.
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB" purge_threshold = "4GB"
# WAL purge interval in seconds.
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_interval = "10m" purge_interval = "10m"
# WAL read batch size.
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128 read_batch_size = 128
# Whether to sync log file after every write.
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false sync_write = false
## Whether to reuse logically truncated log files. # Metadata storage options.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
## **It's only used when the provider is `kafka`**.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
## **It's only used when the provider is `kafka`**.
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
## **It's only used when the provider is `kafka`**.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
## **It's only used when the provider is `kafka`**.
create_topic_timeout = "30s"
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## Metadata storage options.
[metadata_store] [metadata_store]
## Kv file size in bytes. # Kv file size in bytes.
file_size = "256MB" file_size = "256MB"
## Kv purge threshold. # Kv purge threshold.
purge_threshold = "4GB" purge_threshold = "4GB"
## Procedure storage options. # Procedure storage options.
[procedure] [procedure]
## Procedure max retry time. # Procedure max retry time.
max_retry_times = 3 max_retry_times = 3
## Initial retry delay of procedures, increases exponentially # Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
# Example of using S3 as the storage. # Storage options.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
[storage] [storage]
## The working home directory. # The working home directory.
data_home = "/tmp/greptimedb/" data_home = "/tmp/greptimedb/"
# Storage type.
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
# TTL for all tables. Disabled by default.
# global_ttl = "7d"
# Cache configuration for object storage such as 'S3' etc.
# cache_path = "/path/local_cache"
# The local file cache capacity in bytes.
# cache_capacity = "256MB"
## Cache configuration for object storage such as 'S3' etc. # Mito engine options
## The local file cache directory.
## @toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes.
## @toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]] [[region_engine]]
## The Mito engine options.
[region_engine.mito] [region_engine.mito]
# Number of region workers
## Number of region workers. num_workers = 8
#+ num_workers = 8 # Request channel size of each worker
## Request channel size of each worker.
worker_channel_size = 128 worker_channel_size = 128
# Max batch size for a worker to handle requests
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64 worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
# Whether to compress manifest and checkpoint file by gzip (default false).
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
# Max number of running background jobs
## Max number of running background flush jobs (default: 1/2 of cpu cores). max_background_jobs = 4
## @toml2docs:none-default="Auto" # Interval to auto flush a region if it has not flushed yet.
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h" auto_flush_interval = "1h"
# Global write buffer size for all regions.
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. global_write_buffer_size = "1GB"
## @toml2docs:none-default="Auto" # Global write buffer size threshold to reject write requests (default 2G).
#+ global_write_buffer_size = "1GB" global_write_buffer_reject_size = "2GB"
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. sst_meta_cache_size = "128MB"
## @toml2docs:none-default="Auto" # Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
#+ global_write_buffer_reject_size = "2GB" vector_cache_size = "512MB"
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
## Cache size for SST metadata. Setting it to 0 to disable the cache. page_cache_size = "512MB"
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. # Buffer size for SST writing.
## @toml2docs:none-default="Auto"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores). # Log options
## - `0`: using the default value (1/4 of cpu cores). # [logging]
## - `1`: scan in current thread. # Specify logs directory.
## - `n`: scan in parallelism n. # dir = "/tmp/greptimedb/logs"
scan_parallelism = 0 # Specify the log level [info | debug | error | warn]
# level = "info"
## Capacity of the channel to send data from parallel scan tasks to the main task. # whether enable tracing, default is false
parallel_scan_channel_size = 32 # enable_otlp_tracing = false
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
## Whether to allow stale WAL entries read during replay. # otlp_endpoint = "localhost:4317"
allow_stale_entries = false # The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# tracing_sample_ratio = 1.0
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

2
cyborg/.gitignore vendored
View File

@@ -1,2 +0,0 @@
node_modules
.env

View File

@@ -1,79 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEvent} from "@octokit/webhooks-types";
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
import conventionalCommitTypes from 'conventional-commit-types';
import _ from "lodash";
const defaultTypes = Object.keys(conventionalCommitTypes.types)
const breakingChangeLabel = "breaking-change"
// These options are copied from [1].
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
export const parserOpts: Options = {
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
headerCorrespondence: [
'type',
'scope',
'subject'
],
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
revertCorrespondence: ['header', 'hash'],
issuePrefixes: ['#']
}
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
}
const { data: pull_request } = await client.rest.pulls.get({
owner, repo, pull_number: number,
})
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
core.info(`Receive commit: ${JSON.stringify(commit)}`)
if (!commit.type) {
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
}
if (!defaultTypes.includes(commit.type)) {
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
}
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
if (breakingChanges.length > 0) {
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [breakingChangeLabel]
})
}
}
main().catch(handleError)

View File

@@ -1,106 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
// @ts-expect-error moduleResolution:nodenext issue 54523
import {RequestError} from "@octokit/request-error";
const needFollowUpDocs = "[x] This PR requires documentation updates."
const labelDocsNotRequired = "docs-not-required"
const labelDocsRequired = "docs-required"
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const docsClient = obtainClient("DOCS_REPO_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number, actor, title, html_url } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
title: payload.pull_request.title,
html_url: payload.pull_request.html_url,
actor: payload.pull_request.user.login,
}
const followUpDocs = checkPullRequestEvent(payload)
if (followUpDocs) {
core.info("Follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsNotRequired,
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsNotRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired],
})
await docsClient.rest.issues.create({
owner: 'GreptimeTeam',
repo: 'docs',
title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`,
assignee: actor,
}).then((res) => {
core.info(`Created issue ${res.data}`)
})
} else {
core.info("No need to follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsRequired
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
})
}
}
function checkPullRequestEvent(payload: PullRequestEvent) {
switch (payload.action) {
case "opened":
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
case "edited":
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
default:
throw new Error(`${payload.action} is unsupported.`)
}
}
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
// @ts-ignore
return event.pull_request.body?.includes(needFollowUpDocs)
}
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
const previous = event.changes.body?.from.includes(needFollowUpDocs)
const current = event.pull_request.body?.includes(needFollowUpDocs)
// from docs-not-need to docs-required
return (!previous) && current
}
main().catch(handleError)

View File

@@ -1,83 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common"
import {context} from "@actions/github"
import _ from "lodash"
async function main() {
const success = process.env["CI_REPORT_STATUS"] === "true"
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
const {owner, repo} = context.repo
const labels = ['O-ci-failure']
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
labels: labels.join(','),
state: "open",
sort: "created",
direction: "desc",
});
const issue = _.find(issues, (i) => i.title === title);
if (issue) { // exist issue
core.info(`Found previous issue ${issue.html_url}`)
if (!success) {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: failure_comment,
})
} else {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: success_comment,
})
await client.rest.issues.update({
owner,
repo,
issue_number: issue.number,
state: "closed",
state_reason: "completed",
})
}
core.setOutput("html_url", issue.html_url)
} else if (!success) { // create new issue for failure
const issue = await client.rest.issues.create({
owner,
repo,
title,
labels,
body: failure_comment,
})
core.info(`Created issue ${issue.data.html_url}`)
core.setOutput("html_url", issue.data.html_url)
}
}
main().catch(handleError)

View File

@@ -1,73 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {GitHub} from "@actions/github/lib/utils"
import _ from "lodash";
import dayjs from "dayjs";
import {handleError, obtainClient} from "@/common";
async function main() {
const client = obtainClient("GITHUB_TOKEN")
await unassign(client)
}
async function unassign(client: InstanceType<typeof GitHub>) {
const owner = "GreptimeTeam"
const repo = "greptimedb"
const dt = dayjs().subtract(14, 'days');
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
const members = await client.paginate(client.rest.repos.listCollaborators, {
owner,
repo,
permission: "push",
per_page: 100
}).then((members) => members.map((member) => member.login))
core.info(`Members (${members.length}): ${members}`)
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
state: "open",
sort: "created",
direction: "asc",
per_page: 100
})
for (const issue of issues) {
let assignees = [];
if (issue.assignee) {
assignees.push(issue.assignee.login)
}
for (const assignee of issue.assignees) {
assignees.push(assignee.login)
}
assignees = _.uniq(assignees)
assignees = _.difference(assignees, members)
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
await client.rest.issues.removeAssignees({
owner,
repo,
issue_number: issue.number,
assignees: assignees,
})
}
}
}
main().catch(handleError)

View File

@@ -1,26 +0,0 @@
{
"name": "cyborg",
"version": "1.0.0",
"description": "Automator for GreptimeDB Repository Management",
"private": true,
"packageManager": "pnpm@8.15.5",
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
"@octokit/request-error": "^6.1.1",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",
"dayjs": "^1.11.11",
"dotenv": "^16.4.5",
"lodash": "^4.17.21"
},
"devDependencies": {
"@types/conventional-commits-parser": "^5.0.0",
"@types/lodash": "^4.17.0",
"@types/node": "^20.12.7",
"tsconfig-paths": "^4.2.0",
"tsx": "^4.8.2",
"typescript": "^5.4.5"
}
}

612
cyborg/pnpm-lock.yaml generated
View File

@@ -1,612 +0,0 @@
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@actions/core':
specifier: ^1.10.1
version: 1.10.1
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
'@octokit/request-error':
specifier: ^6.1.1
version: 6.1.1
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
conventional-commit-types:
specifier: ^3.0.0
version: 3.0.0
conventional-commits-parser:
specifier: ^5.0.0
version: 5.0.0
dayjs:
specifier: ^1.11.11
version: 1.11.11
dotenv:
specifier: ^16.4.5
version: 16.4.5
lodash:
specifier: ^4.17.21
version: 4.17.21
devDependencies:
'@types/conventional-commits-parser':
specifier: ^5.0.0
version: 5.0.0
'@types/lodash':
specifier: ^4.17.0
version: 4.17.0
'@types/node':
specifier: ^20.12.7
version: 20.12.7
tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0
tsx:
specifier: ^4.8.2
version: 4.8.2
typescript:
specifier: ^5.4.5
version: 5.4.5
packages:
/@actions/core@1.10.1:
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
dependencies:
'@actions/http-client': 2.2.1
uuid: 8.3.2
dev: false
/@actions/github@6.0.0:
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
dependencies:
'@actions/http-client': 2.2.1
'@octokit/core': 5.2.0
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
dev: false
/@actions/http-client@2.2.1:
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
dependencies:
tunnel: 0.0.6
undici: 5.28.4
dev: false
/@esbuild/aix-ppc64@0.20.2:
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [aix]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm64@0.20.2:
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm@0.20.2:
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
engines: {node: '>=12'}
cpu: [arm]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-x64@0.20.2:
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
engines: {node: '>=12'}
cpu: [x64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-arm64@0.20.2:
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
engines: {node: '>=12'}
cpu: [arm64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-x64@0.20.2:
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
engines: {node: '>=12'}
cpu: [x64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-arm64@0.20.2:
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
engines: {node: '>=12'}
cpu: [arm64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-x64@0.20.2:
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
engines: {node: '>=12'}
cpu: [x64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm64@0.20.2:
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
engines: {node: '>=12'}
cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm@0.20.2:
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
engines: {node: '>=12'}
cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ia32@0.20.2:
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
engines: {node: '>=12'}
cpu: [ia32]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-loong64@0.20.2:
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
engines: {node: '>=12'}
cpu: [loong64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-mips64el@0.20.2:
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
engines: {node: '>=12'}
cpu: [mips64el]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ppc64@0.20.2:
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-riscv64@0.20.2:
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
engines: {node: '>=12'}
cpu: [riscv64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-s390x@0.20.2:
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
engines: {node: '>=12'}
cpu: [s390x]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-x64@0.20.2:
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
engines: {node: '>=12'}
cpu: [x64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/netbsd-x64@0.20.2:
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [netbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/openbsd-x64@0.20.2:
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [openbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/sunos-x64@0.20.2:
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
engines: {node: '>=12'}
cpu: [x64]
os: [sunos]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-arm64@0.20.2:
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
engines: {node: '>=12'}
cpu: [arm64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-ia32@0.20.2:
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
engines: {node: '>=12'}
cpu: [ia32]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-x64@0.20.2:
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@fastify/busboy@2.1.1:
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
engines: {node: '>=14'}
dev: false
/@octokit/auth-token@4.0.0:
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
engines: {node: '>= 18'}
dev: false
/@octokit/core@5.2.0:
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/auth-token': 4.0.0
'@octokit/graphql': 7.1.0
'@octokit/request': 8.4.0
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
before-after-hook: 2.2.3
universal-user-agent: 6.0.1
dev: false
/@octokit/endpoint@9.0.5:
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/graphql@7.1.0:
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
engines: {node: '>= 18'}
dependencies:
'@octokit/request': 8.4.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/openapi-types@20.0.0:
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
dev: false
/@octokit/openapi-types@22.2.0:
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
dev: false
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/request-error@5.1.0:
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
deprecation: 2.3.1
once: 1.4.0
dev: false
/@octokit/request-error@6.1.1:
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
dev: false
/@octokit/request@8.4.0:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/endpoint': 9.0.5
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/types@12.6.0:
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
dependencies:
'@octokit/openapi-types': 20.0.0
dev: false
/@octokit/types@13.5.0:
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
dependencies:
'@octokit/openapi-types': 22.2.0
dev: false
/@octokit/webhooks-types@7.5.1:
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
dev: false
/@types/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
dependencies:
'@types/node': 20.12.7
dev: true
/@types/lodash@4.17.0:
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
dev: true
/@types/node@20.12.7:
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
dependencies:
undici-types: 5.26.5
dev: true
/JSONStream@1.3.5:
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
hasBin: true
dependencies:
jsonparse: 1.3.1
through: 2.3.8
dev: false
/before-after-hook@2.2.3:
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
dev: false
/conventional-commit-types@3.0.0:
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
dev: false
/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
engines: {node: '>=16'}
hasBin: true
dependencies:
JSONStream: 1.3.5
is-text-path: 2.0.0
meow: 12.1.1
split2: 4.2.0
dev: false
/dayjs@1.11.11:
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
dev: false
/deprecation@2.3.1:
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
dev: false
/dotenv@16.4.5:
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
engines: {node: '>=12'}
dev: false
/esbuild@0.20.2:
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
engines: {node: '>=12'}
hasBin: true
requiresBuild: true
optionalDependencies:
'@esbuild/aix-ppc64': 0.20.2
'@esbuild/android-arm': 0.20.2
'@esbuild/android-arm64': 0.20.2
'@esbuild/android-x64': 0.20.2
'@esbuild/darwin-arm64': 0.20.2
'@esbuild/darwin-x64': 0.20.2
'@esbuild/freebsd-arm64': 0.20.2
'@esbuild/freebsd-x64': 0.20.2
'@esbuild/linux-arm': 0.20.2
'@esbuild/linux-arm64': 0.20.2
'@esbuild/linux-ia32': 0.20.2
'@esbuild/linux-loong64': 0.20.2
'@esbuild/linux-mips64el': 0.20.2
'@esbuild/linux-ppc64': 0.20.2
'@esbuild/linux-riscv64': 0.20.2
'@esbuild/linux-s390x': 0.20.2
'@esbuild/linux-x64': 0.20.2
'@esbuild/netbsd-x64': 0.20.2
'@esbuild/openbsd-x64': 0.20.2
'@esbuild/sunos-x64': 0.20.2
'@esbuild/win32-arm64': 0.20.2
'@esbuild/win32-ia32': 0.20.2
'@esbuild/win32-x64': 0.20.2
dev: true
/fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: true
optional: true
/get-tsconfig@4.7.3:
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
dependencies:
resolve-pkg-maps: 1.0.0
dev: true
/is-text-path@2.0.0:
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
engines: {node: '>=8'}
dependencies:
text-extensions: 2.4.0
dev: false
/json5@2.2.3:
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
engines: {node: '>=6'}
hasBin: true
dev: true
/jsonparse@1.3.1:
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
engines: {'0': node >= 0.2.0}
dev: false
/lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
dev: false
/meow@12.1.1:
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
engines: {node: '>=16.10'}
dev: false
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
dev: true
/once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: false
/resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
dev: true
/split2@4.2.0:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
dev: false
/strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
dev: true
/text-extensions@2.4.0:
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
engines: {node: '>=8'}
dev: false
/through@2.3.8:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false
/tsconfig-paths@4.2.0:
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
engines: {node: '>=6'}
dependencies:
json5: 2.2.3
minimist: 1.2.8
strip-bom: 3.0.0
dev: true
/tsx@4.8.2:
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
engines: {node: '>=18.0.0'}
hasBin: true
dependencies:
esbuild: 0.20.2
get-tsconfig: 4.7.3
optionalDependencies:
fsevents: 2.3.3
dev: true
/tunnel@0.0.6:
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
dev: false
/typescript@5.4.5:
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
engines: {node: '>=14.17'}
hasBin: true
dev: true
/undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
dev: true
/undici@5.28.4:
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
engines: {node: '>=14.0'}
dependencies:
'@fastify/busboy': 2.1.1
dev: false
/universal-user-agent@6.0.1:
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
dev: false
/uuid@8.3.2:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
hasBin: true
dev: false
/wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: false

View File

@@ -1,30 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {config} from "dotenv";
import {getOctokit} from "@actions/github";
import {GitHub} from "@actions/github/lib/utils";
export function handleError(err: any): void {
console.error(err)
core.setFailed(`Unhandled error: ${err}`)
}
export function obtainClient(token: string): InstanceType<typeof GitHub> {
config()
return getOctokit(process.env[token])
}

View File

@@ -1,14 +0,0 @@
{
"ts-node": {
"require": ["tsconfig-paths/register"]
},
"compilerOptions": {
"module": "NodeNext",
"moduleResolution": "NodeNext",
"target": "ES6",
"paths": {
"@/*": ["./src/*"]
},
"resolveJsonModule": true,
}
}

View File

@@ -1,9 +1,5 @@
FROM centos:7 FROM centos:7
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \

View File

@@ -1,11 +1,5 @@
FROM ubuntu:22.04 FROM ubuntu:22.04
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \ ca-certificates \
python3.10 \ python3.10 \
@@ -13,16 +7,14 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python3-pip \ python3-pip \
curl curl
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt RUN python3 -m pip install -r /etc/greptime/requirements.txt
ARG TARGETARCH ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/ ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN ENTRYPOINT ["greptime"]
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]

View File

@@ -1,16 +0,0 @@
FROM ubuntu:22.04
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
curl
ARG BINARY_PATH
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android RUN rustup target add aarch64-linux-android
# Install cargo-ndk # Install cargo-ndk
RUN cargo install cargo-ndk@3.5.4 RUN cargo install cargo-ndk
ENV ANDROID_NDK_HOME $NDK_ROOT ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint. # Builder entrypoint.

View File

@@ -1,50 +0,0 @@
#!/bin/bash
set -euxo pipefail
cd "$(mktemp -d)"
# Fix version to v1.6.6, this is different than the latest version in original install script in
# https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh
base_url="https://github.com/cargo-bins/cargo-binstall/releases/download/v1.6.6/cargo-binstall-"
os="$(uname -s)"
if [ "$os" == "Darwin" ]; then
url="${base_url}universal-apple-darwin.zip"
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
unzip cargo-binstall-universal-apple-darwin.zip
elif [ "$os" == "Linux" ]; then
machine="$(uname -m)"
if [ "$machine" == "armv7l" ]; then
machine="armv7"
fi
target="${machine}-unknown-linux-musl"
if [ "$machine" == "armv7" ]; then
target="${target}eabihf"
fi
url="${base_url}${target}.tgz"
curl -L --proto '=https' --tlsv1.2 -sSf "$url" | tar -xvzf -
elif [ "${OS-}" = "Windows_NT" ]; then
machine="$(uname -m)"
target="${machine}-pc-windows-msvc"
url="${base_url}${target}.zip"
curl -LO --proto '=https' --tlsv1.2 -sSf "$url"
unzip "cargo-binstall-${target}.zip"
else
echo "Unsupported OS ${os}"
exit 1
fi
./cargo-binstall -y --force cargo-binstall
CARGO_HOME="${CARGO_HOME:-$HOME/.cargo}"
if ! [[ ":$PATH:" == *":$CARGO_HOME/bin:"* ]]; then
if [ -n "${CI:-}" ] && [ -n "${GITHUB_PATH:-}" ]; then
echo "$CARGO_HOME/bin" >> "$GITHUB_PATH"
else
echo
printf "\033[0;31mYour path is missing %s, you might want to add it.\033[0m\n" "$CARGO_HOME/bin"
echo
fi
fi

View File

@@ -2,10 +2,6 @@ FROM centos:7 as builder
ENV LANG en_US.utf8 ENV LANG en_US.utf8
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
# Install dependencies # Install dependencies
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools' RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
RUN yum install -y epel-release \ RUN yum install -y epel-release \
@@ -29,12 +25,5 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest. # Install nextest.
RUN cargo binstall cargo-nextest --no-confirm RUN cargo install cargo-nextest --locked

View File

@@ -1,8 +1,5 @@
FROM ubuntu:20.04 FROM ubuntu:20.04
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
@@ -24,35 +21,16 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python3.10 \ python3.10 \
python3.10-dev python3.10-dev
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
# Remove Python 3.8 and install pip. # Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \ RUN apt-get -y purge python3.8 && \
apt-get -y autoremove && \ apt-get -y autoremove && \
ln -s /usr/bin/python3.10 /usr/bin/python3 && \ ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules. RUN git config --global --add safe.directory /greptimedb
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory *
# Install Python dependencies. # Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust. # Install Rust.
@@ -64,11 +42,5 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest. # Install nextest.
RUN cargo binstall cargo-nextest --no-confirm RUN cargo install cargo-nextest --locked

View File

@@ -43,9 +43,5 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN} RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
RUN cargo install cargo-binstall --version 1.6.6 --locked
# Install nextest. # Install nextest.
RUN cargo binstall cargo-nextest --no-confirm RUN cargo install cargo-nextest --locked

View File

@@ -1,133 +0,0 @@
x-custom:
etcd_initial_cluster_token: &etcd_initial_cluster_token "--initial-cluster-token=etcd-cluster"
etcd_common_settings: &etcd_common_settings
image: "${ETCD_REGISTRY:-quay.io}/${ETCD_NAMESPACE:-coreos}/etcd:${ETCD_VERSION:-v3.5.10}"
entrypoint: /usr/local/bin/etcd
greptimedb_image: &greptimedb_image "${GREPTIMEDB_REGISTRY:-docker.io}/${GREPTIMEDB_NAMESPACE:-greptime}/greptimedb:${GREPTIMEDB_VERSION:-latest}"
services:
etcd0:
<<: *etcd_common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *etcd_initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: *greptimedb_image
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: *greptimedb_image
container_name: datanode0
ports:
- 3001:3001
- 5000:5000
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck:
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: *greptimedb_image
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
healthcheck:
test: [ "CMD", "curl", "-f", "http://frontend0:4000/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
datanode0:
condition: service_healthy
networks:
- greptimedb
flownode0:
image: *greptimedb_image
container_name: flownode0
ports:
- 4004:4004
command:
- flownode
- start
- --node-id=0
- --metasrv-addrs=metasrv:3002
- --rpc-addr=0.0.0.0:4004
- --rpc-hostname=flownode0:4004
depends_on:
frontend0:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -1,51 +0,0 @@
# Log benchmark configuration
This repo holds the configuration we used to benchmark GreptimeDB, Clickhouse and Elastic Search.
Here are the versions of databases we used in the benchmark
| name | version |
| :------------ | :--------- |
| GreptimeDB | v0.9.2 |
| Clickhouse | 24.9.1.219 |
| Elasticsearch | 8.15.0 |
## Structured model vs Unstructured model
We divide test into two parts, using structured model and unstructured model accordingly. You can also see the difference in create table clause.
__Structured model__
The log data is pre-processed into columns by vector. For example an insert request looks like following
```SQL
INSERT INTO test_table (bytes, http_version, ip, method, path, status, user, timestamp) VALUES ()
```
The goal is to test string/text support for each database. In real scenarios it means the datasource(or log data producers) have separate fields defined, or have already processed the raw input.
__Unstructured model__
The log data is inserted as a long string, and then we build fulltext index upon these strings. For example an insert request looks like following
```SQL
INSERT INTO test_table (message, timestamp) VALUES ()
```
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
## Creating tables
See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
The mapping of Elastic search is created automatically.
## Vector Configuration
We use vector to generate random log data and send inserts to databases.
Please refer to [structured config](./structured_vector.toml) and [unstructured config](./unstructured_vector.toml) for detailed configuration.
## SQLs and payloads
Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [query payload](./query.md) for Elastic search.
## Steps to reproduce
0. Decide whether to run structured model test or unstructured mode test.
1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
2. Create table in GreptimeDB and Clickhouse in advance.
3. Run vector to insert data.
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
## Addition
- You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).

View File

@@ -1,56 +0,0 @@
-- GreptimeDB create table clause
-- structured test, use vector to pre-process log data into fields
CREATE TABLE IF NOT EXISTS `test_table` (
`bytes` Int64 NULL,
`http_version` STRING NULL,
`ip` STRING NULL,
`method` STRING NULL,
`path` STRING NULL,
`status` SMALLINT UNSIGNED NULL,
`user` STRING NULL,
`timestamp` TIMESTAMP(3) NOT NULL,
PRIMARY KEY (`user`, `path`, `status`),
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- unstructured test, build fulltext index on message column
CREATE TABLE IF NOT EXISTS `test_table` (
`message` STRING NULL FULLTEXT WITH(analyzer = 'English', case_sensitive = 'false'),
`timestamp` TIMESTAMP(3) NOT NULL,
TIME INDEX (`timestamp`)
)
ENGINE=mito
WITH(
append_mode = 'true'
);
-- Clickhouse create table clause
-- structured test
CREATE TABLE IF NOT EXISTS test_table
(
bytes UInt64 NOT NULL,
http_version String NOT NULL,
ip String NOT NULL,
method String NOT NULL,
path String NOT NULL,
status UInt8 NOT NULL,
user String NOT NULL,
timestamp String NOT NULL,
)
ENGINE = MergeTree()
ORDER BY (user, path, status);
-- unstructured test
SET allow_experimental_full_text_index = true;
CREATE TABLE IF NOT EXISTS test_table
(
message String,
timestamp String,
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
)
ENGINE = MergeTree()
ORDER BY tuple();

View File

@@ -1,199 +0,0 @@
# Query URL and payload for Elastic Search
## Count
URL: `http://127.0.0.1:9200/_count`
## Query by timerange
URL: `http://127.0.0.1:9200/_search`
You can use the following payload to get the full timerange first.
```JSON
{"size":0,"aggs":{"max_timestamp":{"max":{"field":"timestamp"}},"min_timestamp":{"min":{"field":"timestamp"}}}}
```
And then use this payload to query by timerange.
```JSON
{
"from": 0,
"size": 1000,
"query": {
"range": {
"timestamp": {
"gte": "2024-08-16T04:30:44.000Z",
"lte": "2024-08-16T04:51:52.000Z"
}
}
}
}
```
## Query by condition
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"from": 0,
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
}
]
}
}
}
```
## Query by condition and timerange
URL: `http://127.0.0.1:9200/_search`
### Structured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"term": {
"user.keyword": "CrucifiX"
}
},
{
"term": {
"method.keyword": "OPTION"
}
},
{
"term": {
"path.keyword": "/user/booperbot124"
}
},
{
"term": {
"http_version.keyword": "HTTP/1.1"
}
},
{
"term": {
"status": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T07:03:37.383Z",
"lte": "2024-08-19T07:24:58.883Z"
}
}
}
]
}
}
}
```
### Unstructured payload
```JSON
{
"size": 10000,
"query": {
"bool": {
"must": [
{
"match_phrase": {
"message": "CrucifiX"
}
},
{
"match_phrase": {
"message": "OPTION"
}
},
{
"match_phrase": {
"message": "/user/booperbot124"
}
},
{
"match_phrase": {
"message": "HTTP/1.1"
}
},
{
"match_phrase": {
"message": "401"
}
},
{
"range": {
"timestamp": {
"gte": "2024-08-19T05:16:17.099Z",
"lte": "2024-08-19T05:46:02.722Z"
}
}
}
]
}
}
}
```

View File

@@ -1,50 +0,0 @@
-- Structured query for GreptimeDB and Clickhouse
-- query count
select count(*) from test_table;
-- query by timerange. Note: place the timestamp range in the where clause
-- GreptimeDB
-- you can use `select max(timestamp)::bigint from test_table;` and `select min(timestamp)::bigint from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
-- you can use `select max(timestamp) from test_table;` and `select min(timestamp) from test_table;`
-- to get the full timestamp range
select * from test_table where timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- query by condition
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401;
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE user = "CrucifiX" and method = "OPTION" and path = "/user/booperbot124" and http_version = "HTTP/1.1" and status = 401
and timestamp between 1723774396760 and 1723774788760;
-- Clickhouse
SELECT * FROM test_table WHERE user = 'CrucifiX' and method = 'OPTION' and path = '/user/booperbot124' and http_version = 'HTTP/1.1' and status = 401
and timestamp between '2024-08-16T03:58:46Z' and '2024-08-16T04:03:50Z';
-- Unstructured query for GreptimeDB and Clickhouse
-- query by condition
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401");
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%');
-- query by condition and timerange
-- GreptimeDB
SELECT * FROM test_table WHERE MATCHES(message, "+CrucifiX +OPTION +/user/booperbot124 +HTTP/1.1 +401")
and timestamp between 1723710843619 and 1723711367588;
-- Clickhouse
SELECT * FROM test_table WHERE (message LIKE '%CrucifiX%')
AND (message LIKE '%OPTION%')
AND (message LIKE '%/user/booperbot124%')
AND (message LIKE '%HTTP/1.1%')
AND (message LIKE '%401%')
AND timestamp between '2024-08-15T10:25:26.524000000Z' AND '2024-08-15T10:31:31.746000000Z';

View File

@@ -1,57 +0,0 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_logitem
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[transforms.parse_logs]
type = "remap"
inputs = ["demo_logs"]
source = '''
. = parse_regex!(.message, r'^(?P<ip>\S+) - (?P<user>\S+) \[(?P<timestamp>[^\]]+)\] "(?P<method>\S+) (?P<path>\S+) (?P<http_version>\S+)" (?P<status>\d+) (?P<bytes>\d+)$')
# Convert timestamp to a standard format
.timestamp = parse_timestamp!(.timestamp, format: "%d/%b/%Y:%H:%M:%S %z")
# Convert status and bytes to integers
.status = to_int!(.status)
.bytes = to_int!(.bytes)
'''
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "parse_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 4000
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "parse_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "parse_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

View File

@@ -1,43 +0,0 @@
# Please note we use patched branch to build vector
# https://github.com/shuiyisong/vector/tree/chore/greptime_log_ingester_ft
[sources.demo_logs]
type = "demo_logs"
format = "apache_common"
# interval value = 1 / rps
# say you want to insert at 20k/s, that is 1 / 20000 = 0.00005
# set to 0 to run as fast as possible
interval = 0
# total rows to insert
count = 100000000
lines = [ "line1" ]
[sinks.sink_greptime_logs]
type = "greptimedb_logs"
# The table to insert into
table = "test_table"
pipeline_name = "demo_pipeline"
compression = "none"
inputs = [ "demo_logs" ]
endpoint = "http://127.0.0.1:4000"
# Batch size for each insertion
batch.max_events = 500
[sinks.clickhouse]
type = "clickhouse"
inputs = [ "demo_logs" ]
database = "default"
endpoint = "http://127.0.0.1:8123"
format = "json_each_row"
# The table to insert into
table = "test_table"
[sinks.sink_elasticsearch]
type = "elasticsearch"
inputs = [ "demo_logs" ]
api_version = "auto"
compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
mode = "bulk"

Some files were not shown because too many files have changed in this diff Show More