Compare commits

..

4 Commits

Author SHA1 Message Date
Lei, HUANG
f7d2ede4a4 fix: static assets path prefix 2023-04-18 19:29:17 +08:00
Lei, HUANG
df6ebbf934 chore: remove push uhub step 2023-04-18 18:29:54 +08:00
Lei, HUANG
719fbf2f3a chore: bump dashboard to v0.2.1 2023-04-18 14:32:07 +08:00
Lei, HUANG
cc14dea913 chore: bump version to v0.2.0 2023-04-17 15:16:49 +08:00
2254 changed files with 84315 additions and 377950 deletions

View File

@@ -3,3 +3,13 @@ linker = "aarch64-linux-gnu-gcc"
[alias]
sqlness = "run --bin sqlness-runner --"
[build]
rustflags = [
# lints
# TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::implicit_clone",
]

View File

@@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

View File

@@ -1,3 +1,2 @@
[profile.default]
slow-timeout = { period = "60s", terminate-after = 3, grace-period = "30s" }
retries = { backoff = "exponential", count = 3, delay = "10s", jitter = true }

View File

@@ -20,3 +20,6 @@ out/
# Rust
target/
# Git
.git

View File

@@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[{Makefile,**.mk}]
indent_style = tab

View File

@@ -3,34 +3,8 @@ GT_S3_BUCKET=S3 bucket
GT_S3_ACCESS_KEY_ID=S3 access key id
GT_S3_ACCESS_KEY=S3 secret access key
GT_S3_ENDPOINT_URL=S3 endpoint url
GT_S3_REGION=S3 region
# Settings for oss test
GT_OSS_BUCKET=OSS bucket
GT_OSS_ACCESS_KEY_ID=OSS access key id
GT_OSS_ACCESS_KEY=OSS access key
GT_OSS_ENDPOINT=OSS endpoint
# Settings for azblob test
GT_AZBLOB_CONTAINER=AZBLOB container
GT_AZBLOB_ACCOUNT_NAME=AZBLOB account name
GT_AZBLOB_ACCOUNT_KEY=AZBLOB account key
GT_AZBLOB_ENDPOINT=AZBLOB endpoint
# Settings for gcs test
GT_GCS_BUCKET = GCS bucket
GT_GCS_SCOPE = GCS scope
GT_GCS_CREDENTIAL_PATH = GCS credential path
GT_GCS_CREDENTIAL = GCS credential
GT_GCS_ENDPOINT = GCS end point
# Settings for kafka wal test
GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002
# Setting for unstable fuzz tests
GT_FUZZ_BINARY_PATH=/path/to/
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
GT_FUZZ_INPUT_MAX_ROWS=2048
GT_FUZZ_INPUT_MAX_TABLES=32
GT_FUZZ_INPUT_MAX_COLUMNS=32
GT_FUZZ_INPUT_MAX_ALTER_ACTIONS=256
GT_FUZZ_INPUT_MAX_INSERT_ACTIONS=8

27
.github/CODEOWNERS vendored
View File

@@ -1,27 +0,0 @@
# GreptimeDB CODEOWNERS
# These owners will be the default owners for everything in the repo.
* @GreptimeTeam/db-approver
## [Module] Databse Engine
/src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag
## [Module] Distributed
/src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield
## [Module] Write Ahead Log
/src/log-store @v0y4g3r
/src/store-api @v0y4g3r
## [Module] Metrics Engine
/src/metric-engine @waynexia
/src/promql @waynexia
## [Module] Flow
/src/flow @zhongzc @waynexia

View File

@@ -1,7 +1,7 @@
---
name: Bug report
description: Is something not working? Help us fix it!
labels: [ "C-bug" ]
labels: [ "bug" ]
body:
- type: markdown
attributes:
@@ -21,7 +21,6 @@ body:
- Locking issue
- Performance issue
- Unexpected error
- User Experience
- Other
validations:
required: true
@@ -34,40 +33,21 @@ body:
multiple: true
options:
- Standalone mode
- Distributed Cluster
- Storage Engine
- Query Engine
- Table Engine
- Write Protocols
- Metasrv
- Frontend
- Datanode
- Meta
- Other
validations:
required: true
- type: textarea
id: reproduce
id: what-happened
attributes:
label: Minimal reproduce step
label: What happened?
description: |
Please walk us through and provide steps and details on how
to reproduce the issue. If possible, provide scripts that we
can run to trigger the bug.
validations:
required: true
- type: textarea
id: expected-manner
attributes:
label: What did you expect to see?
validations:
required: true
- type: textarea
id: actual-manner
attributes:
label: What did you see instead?
Tell us what happened and also what you would have expected to
happen instead.
placeholder: "Describe the bug"
validations:
required: true
@@ -83,17 +63,6 @@ body:
validations:
required: true
- type: input
id: greptimedb
attributes:
label: What version of GreptimeDB did you use?
description: |
Please provide the version of GreptimeDB. For example:
0.5.1 etc. You can get it by executing command line `greptime --version`.
placeholder: "0.5.1"
validations:
required: true
- type: textarea
id: logs
attributes:
@@ -103,3 +72,15 @@ body:
trace. This will be automatically formatted into code, so no
need for backticks.
render: bash
- type: textarea
id: reproduce
attributes:
label: How can we reproduce the bug?
description: |
Please walk us through and provide steps and details on how
to reproduce the issue. If possible, provide scripts that we
can run to trigger the bug.
render: bash
validations:
required: true

View File

@@ -4,5 +4,5 @@ contact_links:
url: https://greptime.com/slack
about: Get free help from the Greptime community
- name: Greptime Community Discussion
url: https://github.com/greptimeTeam/discussions
url: https://github.com/greptimeTeam/greptimedb/discussions
about: Get free help from the Greptime community

View File

@@ -1,7 +1,7 @@
---
name: Enhancement
description: Suggest an enhancement to existing functionality
labels: [ "C-enhancement" ]
labels: [ "enhancement" ]
body:
- type: dropdown
id: type

View File

@@ -1,7 +1,7 @@
---
name: New Feature
name: Feature request
description: Suggest a new feature for GreptimeDB
labels: [ "C-feature" ]
labels: [ "feature request" ]
body:
- type: markdown
id: info

View File

@@ -1,18 +0,0 @@
name: Build and push CI Docker image
description: Build and push CI Docker image to local registry
inputs:
binary_path:
default: "./bin"
description: "Binary path"
runs:
using: composite
steps:
- name: Build and push to local registry
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
push: true
tags: localhost:5001/greptime/greptimedb:latest
build-args: |
BINARY_PATH=${{ inputs.binary_path }}

View File

@@ -1,76 +0,0 @@
name: Build and push dev-builder images
description: Build and push dev-builder images to DockerHub and ACR
inputs:
dockerhub-image-registry:
description: The dockerhub image registry to store the images
required: false
default: docker.io
dockerhub-image-registry-username:
description: The dockerhub username to login to the image registry
required: true
dockerhub-image-registry-token:
description: The dockerhub token to login to the image registry
required: true
dockerhub-image-namespace:
description: The dockerhub namespace of the image registry to store the images
required: false
default: greptime
version:
description: Version of the dev-builder
required: false
default: latest
build-dev-builder-ubuntu:
description: Build dev-builder-ubuntu image
required: false
default: "true"
build-dev-builder-centos:
description: Build dev-builder-centos image
required: false
default: "true"
build-dev-builder-android:
description: Build dev-builder-android image
required: false
default: "true"
runs:
using: composite
steps:
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
registry: ${{ inputs.dockerhub-image-registry }}
username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }}
- name: Build and push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=all \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image
shell: bash
if: ${{ inputs.build-dev-builder-centos == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
shell: bash
if: ${{ inputs.build-dev-builder-android == 'true' }}
run: |
make dev-builder \
BASE_IMAGE=android \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -1,77 +0,0 @@
name: Build greptime binary
description: Build and upload the single linux artifact
inputs:
base-image:
description: Base image to build greptime
required: true
features:
description: Cargo features to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
version:
description: Version of the artifact
required: true
working-dir:
description: Working directory to build the artifacts
required: false
default: .
build-android-artifacts:
description: Build android artifacts
required: false
default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs:
using: composite
steps:
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }}
run: |
cd ${{ inputs.working-dir }} && \
make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
if: ${{ inputs.build-android-artifacts == 'false' }}
env:
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/$PROFILE_TARGET/greptime
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }}
run: |
cd ${{ inputs.working-dir }} && make strip-android-bin \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts
uses: ./.github/actions/upload-artifacts
if: ${{ inputs.build-android-artifacts == 'true' }}
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/aarch64-linux-android/release/greptime
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}

View File

@@ -1,104 +0,0 @@
name: Build greptime images
description: Build and push greptime images
inputs:
image-registry:
description: The image registry to store the images
required: true
image-registry-username:
description: The username to login to the image registry
required: true
image-registry-password:
description: The password to login to the image registry
required: true
amd64-artifact-name:
description: The name of the amd64 artifact for building images
required: true
arm64-artifact-name:
description: The name of the arm64 artifact for building images
required: false
default: ""
image-namespace:
description: The namespace of the image registry to store the images
required: true
image-name:
description: The name of the image to build
required: true
image-tag:
description: The tag of the image to build
required: true
docker-file:
description: The path to the Dockerfile to build
required: true
platforms:
description: The supported platforms to build the image
required: true
push-latest-tag:
description: Whether to push the latest tag
required: false
default: 'true'
runs:
using: composite
steps:
- name: Login to image registry
uses: docker/login-action@v2
with:
registry: ${{ inputs.image-registry }}
username: ${{ inputs.image-registry-username }}
password: ${{ inputs.image-registry-password }}
- name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v2
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 artifacts
uses: actions/download-artifact@v4
with:
name: ${{ inputs.amd64-artifact-name }}
- name: Unzip the amd64 artifacts
shell: bash
run: |
tar xvf ${{ inputs.amd64-artifact-name }}.tar.gz && \
rm ${{ inputs.amd64-artifact-name }}.tar.gz && \
rm -rf amd64 && \
mv ${{ inputs.amd64-artifact-name }} amd64
- name: Download arm64 artifacts
uses: actions/download-artifact@v4
if: ${{ inputs.arm64-artifact-name }}
with:
name: ${{ inputs.arm64-artifact-name }}
- name: Unzip the arm64 artifacts
shell: bash
if: ${{ inputs.arm64-artifact-name }}
run: |
tar xvf ${{ inputs.arm64-artifact-name }}.tar.gz && \
rm ${{ inputs.arm64-artifact-name }}.tar.gz && \
rm -rf arm64 && \
mv ${{ inputs.arm64-artifact-name }} arm64
- name: Build and push images(without latest) for amd64 and arm64
if: ${{ inputs.push-latest-tag == 'false' }}
uses: docker/build-push-action@v3
with:
context: .
file: ${{ inputs.docker-file }}
push: true
platforms: ${{ inputs.platforms }}
tags: |
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}
- name: Build and push images for amd64 and arm64
if: ${{ inputs.push-latest-tag == 'true' }}
uses: docker/build-push-action@v3
with:
context: .
file: ${{ inputs.docker-file }}
push: true
platforms: ${{ inputs.platforms }}
tags: |
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.image-tag }}

View File

@@ -1,62 +0,0 @@
name: Group for building greptimedb images
description: Group for building greptimedb images
inputs:
image-registry:
description: The image registry to store the images
required: true
image-namespace:
description: The namespace of the image registry to store the images
required: true
image-name:
description: The name of the image to build
required: false
default: greptimedb
image-registry-username:
description: The username to login to the image registry
required: true
image-registry-password:
description: The password to login to the image registry
required: true
version:
description: Version of the artifact
required: true
push-latest-tag:
description: Whether to push the latest tag
required: false
default: 'true'
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: 'false'
runs:
using: composite
steps:
- name: Build and push standard images to dockerhub
uses: ./.github/actions/build-greptime-images
with: # The image will be used as '${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}'
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
image-registry-username: ${{ inputs.image-registry-username }}
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }}
docker-file: docker/ci/ubuntu/Dockerfile
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
platforms: linux/amd64,linux/arm64
push-latest-tag: ${{ inputs.push-latest-tag }}
- name: Build and push centos images to dockerhub
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-images
with:
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
image-registry-username: ${{ inputs.image-registry-username }}
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}-centos
image-tag: ${{ inputs.version }}
docker-file: docker/ci/centos/Dockerfile
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
platforms: linux/amd64
push-latest-tag: ${{ inputs.push-latest-tag }}

View File

@@ -1,104 +0,0 @@
name: Build linux artifacts
description: Build linux artifacts
inputs:
arch:
description: Architecture to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
dev-mode:
description: Enable dev mode, only build standard greptime
required: false
default: "false"
image-namespace:
description: Image Namespace
required: true
image-registry:
description: Image Registry
required: true
working-dir:
description: Working directory to build the artifacts
required: false
default: .
runs:
using: composite
steps:
- name: Run integration test
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: |
cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
- name: Build standard greptime
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: pyo3_backend,servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
run: |
rm -rf ./target/
- name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with:
base-image: centos
features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
with:
base-image: android
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}

View File

@@ -1,94 +0,0 @@
name: Build macos artifacts
description: Build macos artifacts
inputs:
arch:
description: Architecture to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
features:
description: Cargo features to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
runs:
using: composite
steps:
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ inputs.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install protoc
shell: bash
run: |
brew install protobuf
- name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: ${{ inputs.arch }}
- name: Start etcd # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
run: |
brew install etcd && \
brew services start etcd
- name: Install latest nextest release # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
SQLNESS_OPTS: "--preserve-state"
run: |
make test sqlness-test
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
- name: Build greptime binary
shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: |
make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
TARGET=${{ inputs.arch }}
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}

View File

@@ -1,80 +0,0 @@
name: Build Windows artifacts
description: Build Windows artifacts
inputs:
arch:
description: Architecture to build
required: true
cargo-profile:
description: Cargo profile to build
required: true
features:
description: Cargo features to build
required: true
version:
description: Version of the artifact
required: true
disable-run-tests:
description: Disable running integration tests
required: true
artifacts-dir:
description: Directory to store artifacts
required: true
runs:
using: composite
steps:
- uses: arduino/setup-protoc@v3
- name: Install rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: ${{ inputs.arch }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
shell: pwsh
run: pip install pyarrow numpy
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Install latest nextest release # For integration tests.
if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest
- name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }}
shell: pwsh
run: make test sqlness-test
env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed.
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
retention-days: 3
- name: Build greptime binary
shell: pwsh
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }} --bin greptime
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}

View File

@@ -1,31 +0,0 @@
name: Deploy GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
aws-ci-test-bucket:
description: 'AWS S3 bucket name for testing'
required: true
aws-region:
description: 'AWS region for testing'
required: true
data-root:
description: 'Data root for testing'
required: true
aws-access-key-id:
description: 'AWS access key id for testing'
required: true
aws-secret-access-key:
description: 'AWS secret access key for testing'
required: true
runs:
using: composite
steps:
- name: Deploy GreptimeDB by Helm
shell: bash
env:
DATA_ROOT: ${{ inputs.data-root }}
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: |
./.github/scripts/deploy-greptimedb.sh

View File

@@ -1,19 +0,0 @@
name: Fuzz Test
description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
required: true
max-total-time:
description: "Max total time(secs)"
required: true
unstable:
default: 'false'
description: "Enable unstable feature"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}

View File

@@ -1,53 +0,0 @@
name: Publish GitHub release
description: Publish GitHub release
inputs:
version:
description: Version to release
required: true
runs:
using: composite
steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR}
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ...
- name: Download artifacts
uses: actions/download-artifact@v4
- name: Create git tag for release
if: ${{ github.event_name != 'push' }} # Meaning this is a scheduled or manual workflow.
shell: bash
run: |
git tag ${{ inputs.version }}
# Only publish release when the release tag is like v1.0.0, v1.0.1, v1.0.2, etc.
- name: Set release arguments
shell: bash
run: |
if [[ "${{ inputs.version }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "prerelease=false" >> $GITHUB_ENV
echo "makeLatest=true" >> $GITHUB_ENV
echo "generateReleaseNotes=false" >> $GITHUB_ENV
echo "omitBody=true" >> $GITHUB_ENV
else
echo "prerelease=true" >> $GITHUB_ENV
echo "makeLatest=false" >> $GITHUB_ENV
echo "generateReleaseNotes=true" >> $GITHUB_ENV
echo "omitBody=false" >> $GITHUB_ENV
fi
- name: Publish release
uses: ncipollo/release-action@v1
with:
name: "Release ${{ inputs.version }}"
prerelease: ${{ env.prerelease }}
makeLatest: ${{ env.makeLatest }}
tag: ${{ inputs.version }}
generateReleaseNotes: ${{ env.generateReleaseNotes }}
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
allowUpdates: true
artifacts: |
**/greptime-*/*

View File

@@ -1,138 +0,0 @@
name: Release CN artifacts
description: Release artifacts to CN region
inputs:
src-image-registry:
description: The source image registry to store the images
required: true
default: docker.io
src-image-namespace:
description: The namespace of the source image registry to store the images
required: true
default: greptime
src-image-name:
description: The name of the source image
required: false
default: greptimedb
dst-image-registry:
description: The destination image registry to store the images
required: true
dst-image-namespace:
description: The namespace of the destination image registry to store the images
required: true
default: greptime
dst-image-registry-username:
description: The username to login to the image registry
required: true
dst-image-registry-password:
description: The password to login to the image registry
required: true
version:
description: Version of the artifact
required: true
dev-mode:
description: Enable dev mode, only push standard greptime
required: false
default: 'false'
push-latest-tag:
description: Whether to push the latest tag of the image
required: false
default: 'true'
aws-cn-s3-bucket:
description: S3 bucket to store released artifacts in CN region
required: true
aws-cn-access-key-id:
description: AWS access key id in CN region
required: true
aws-cn-secret-access-key:
description: AWS secret access key in CN region
required: true
aws-cn-region:
description: AWS region in CN
required: true
upload-to-s3:
description: Upload to S3
required: false
default: 'true'
artifacts-dir:
description: Directory to store artifacts
required: false
default: 'artifacts'
update-version-info:
description: Update the version info in S3
required: false
default: 'true'
upload-max-retry-times:
description: Max retry times for uploading artifacts to S3
required: false
default: "20"
upload-retry-timeout:
description: Timeout for uploading artifacts to S3
required: false
default: "30" # minutes
runs:
using: composite
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: ${{ inputs.artifacts-dir }}
- name: Release artifacts to cn region
uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }}
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
command: |
./.github/scripts/upload-artifacts-to-s3.sh \
${{ inputs.artifacts-dir }} \
${{ inputs.version }} \
${{ inputs.aws-cn-s3-bucket }}
- name: Push greptimedb image from Dockerhub to ACR
shell: bash
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push latest greptimedb image from Dockerhub to ACR
shell: bash
if: ${{ inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.dev-mode == 'false' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:${{ inputs.version }} \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
- name: Push latest greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}

View File

@@ -1,17 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
kubectl create ns chaos-mesh
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
- name: Print Chaos-mesh
if: always()
shell: bash
run: |
kubectl get po -n chaos-mesh

View File

@@ -1,16 +0,0 @@
name: Setup cyborg environment
description: Setup cyborg environment
runs:
using: composite
steps:
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
shell: bash
run: pnpm tsx -v

View File

@@ -1,27 +0,0 @@
name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 1
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm upgrade \
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set resources.limits.cpu=1500m \
--set resources.limits.memory=2Gi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,95 +0,0 @@
name: Setup GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
frontend-replicas:
default: 2
description: "Number of Frontend replicas"
datanode-replicas:
default: 2
description: "Number of Datanode replicas"
meta-replicas:
default: 3
description: "Number of Metasrv replicas"
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
enable-region-failover:
default: false
runs:
using: composite
steps:
- name: Install GreptimeDB operator
uses: nick-fields/retry@v3
with:
timeout_minutes: 3
max_attempts: 3
shell: bash
command: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
--create-namespace \
greptimedb-operator greptime/greptimedb-operator \
-n greptimedb-admin \
--wait \
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
kubectl get pods -n my-greptimedb
sleep 5 # wait for 5 seconds before check again.
fi
done
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
kubectl describe nodes

View File

@@ -1,13 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4

View File

@@ -1,33 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,29 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,45 +0,0 @@
meta:
configData: |-
[runtime]
global_rt_size = 4
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
configData: |-
[runtime]
global_rt_size = 4
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
remoteWal:
enabled: true
kafka:
brokerEndpoints:
- "kafka.kafka-cluster.svc.cluster.local:9092"

View File

@@ -1,24 +0,0 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,10 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
./.github/scripts/kind-with-registry.sh

View File

@@ -1,24 +0,0 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -1,30 +0,0 @@
name: Setup PostgreSQL
description: Deploy PostgreSQL on Kubernetes
inputs:
postgres-replicas:
default: 1
description: "Number of PostgreSQL replicas"
namespace:
default: "postgres-namespace"
postgres-version:
default: "14.2"
description: "PostgreSQL version"
storage-size:
default: "1Gi"
description: "Storage size for PostgreSQL"
runs:
using: composite
steps:
- name: Install PostgreSQL
shell: bash
run: |
helm upgrade \
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
--set replicaCount=${{ inputs.postgres-replicas }} \
--set image.tag=${{ inputs.postgres-version }} \
--set persistence.size=${{ inputs.storage-size }} \
--set postgresql.username=greptimedb \
--set postgresql.password=admin \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,70 +0,0 @@
name: Run sqlness test
description: Run sqlness test on GreptimeDB
inputs:
aws-ci-test-bucket:
description: 'AWS S3 bucket name for testing'
required: true
aws-region:
description: 'AWS region for testing'
required: true
data-root:
description: 'Data root for testing'
required: true
aws-access-key-id:
description: 'AWS access key id for testing'
required: true
aws-secret-access-key:
description: 'AWS secret access key for testing'
required: true
runs:
using: composite
steps:
- name: Deploy GreptimeDB cluster by Helm
uses: ./.github/actions/deploy-greptimedb
with:
data-root: ${{ inputs.data-root }}
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
aws-region: ${{ inputs.aws-region }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
- name: Run tests on greptimedb cluster
shell: bash
run: |
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
- name: Run tests on greptimedb cluster that uses S3
shell: bash
run: |
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
- name: Run tests on standalone greptimedb
shell: bash
run: |
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
- name: Clean S3 data
shell: bash
env:
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
- name: Export kind logs
if: failure()
shell: bash
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: kind-logs
path: /tmp/kind
retention-days: 3

View File

@@ -1,67 +0,0 @@
name: Start EC2 runner
description: Start EC2 runner
inputs:
runner:
description: The linux runner name
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
github-token:
description: The GitHub token to clone private repository
required: false
default: ""
image-id:
description: The EC2 image id
required: true
security-group-id:
description: The EC2 security group id
required: true
subnet-id:
description: The EC2 subnet id
required: true
outputs:
label:
description: "label"
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.label || inputs.runner }}
ec2-instance-id:
description: "ec2-instance-id"
value: ${{ steps.start-linux-arm64-ec2-runner.outputs.ec2-instance-id }}
runs:
using: composite
steps:
- name: Configure AWS credentials
if: startsWith(inputs.runner, 'ec2')
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
# The EC2 runner will use the following format:
# <vm-type>-<instance-type>-<arch>
# like 'ec2-c6a.4xlarge-amd64'.
- name: Get EC2 instance type
if: startsWith(inputs.runner, 'ec2')
id: get-ec2-instance-type
shell: bash
run: |
echo "instance-type=$(echo ${{ inputs.runner }} | cut -d'-' -f2)" >> $GITHUB_OUTPUT
- name: Start EC2 runner
if: startsWith(inputs.runner, 'ec2')
uses: machulav/ec2-github-runner@v2
id: start-linux-arm64-ec2-runner
with:
mode: start
ec2-image-id: ${{ inputs.image-id }}
ec2-instance-type: ${{ steps.get-ec2-instance-type.outputs.instance-type }}
subnet-id: ${{ inputs.subnet-id }}
security-group-id: ${{ inputs.security-group-id }}
github-token: ${{ inputs.github-token }}

View File

@@ -1,41 +0,0 @@
name: Stop EC2 runner
description: Stop EC2 runner
inputs:
label:
description: The linux runner name
required: true
ec2-instance-id:
description: The EC2 instance id
required: true
aws-access-key-id:
description: AWS access key id
required: true
aws-secret-access-key:
description: AWS secret access key
required: true
aws-region:
description: AWS region
required: true
github-token:
description: The GitHub token to clone private repository
required: false
default: ""
runs:
using: composite
steps:
- name: Configure AWS credentials
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
aws-region: ${{ inputs.aws-region }}
- name: Stop EC2 runner
if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: machulav/ec2-github-runner@v2
with:
mode: stop
label: ${{ inputs.label }}
ec2-instance-id: ${{ inputs.ec2-instance-id }}
github-token: ${{ inputs.github-token }}

View File

@@ -1,64 +0,0 @@
name: Upload artifacts
description: Upload artifacts
inputs:
artifacts-dir:
description: Directory to store artifacts
required: true
target-file:
description: The path of the target artifact
required: false
version:
description: Version of the artifact
required: true
working-dir:
description: Working directory to upload the artifacts
required: false
default: .
runs:
using: composite
steps:
- name: Create artifacts directory
if: ${{ inputs.target-file != '' }}
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
mkdir -p ${{ inputs.artifacts-dir }} && \
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
# The compressed artifacts will use the following layout:
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-pyo3-v0.3.0
# └── greptime
- name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
- name: Calculate checksum
if: runner.os != 'Windows'
working-directory: ${{ inputs.working-dir }}
shell: bash
run: |
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
- name: Calculate checksum on Windows
if: runner.os == 'Windows'
working-directory: ${{ inputs.working-dir }}
shell: pwsh
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
# However, when we use 'actions/download-artifact' to download the artifacts, it will be automatically unzipped.
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.artifacts-dir }}
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.tar.gz
- name: Upload checksum
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

12
.github/pr-title-checker-config.json vendored Normal file
View File

@@ -0,0 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

View File

@@ -1,10 +1,8 @@
I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeTeam/.github/blob/main/CLA.md).
## Refer to a related PR or issue link (optional)
I hereby agree to the terms of the [GreptimeDB CLA](https://gist.github.com/xtang/6378857777706e568c1949c7578592cc)
## What's changed and what's your intention?
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
_PLEASE DO NOT LEAVE THIS EMPTY !!!_
Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -15,6 +13,7 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates.
- [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests.
## Refer to a related PR or issue link (optional)

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
SRC_IMAGE=$1
DST_REGISTRY=$2
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
# Check if necessary variables are set.
function check_vars() {
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
exit 1
fi
done
}
# Copies images from DockerHub to the destination registry.
function copy_images_from_dockerhub() {
# Check if docker is installed.
if ! command -v docker &> /dev/null; then
echo "docker is not installed. Please install docker to continue."
exit 1
fi
# Extract the name and tag of the source image.
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://"$DST_REGISTRY/$IMAGE_NAME"
}
function main() {
check_vars
copy_images_from_dockerhub
}
# Usage example:
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
main

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env bash
set -e
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like 'v0.2.0-nightly-20230313';
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-$(git rev-parse --short HEAD)-YYYYMMDDSS', like 'v0.2.0-e5b243c-2023071245';
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() {
# Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty"
exit 1
fi
if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty"
exit 1
fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
echo "NIGHTLY_RELEASE_PREFIX is empty"
exit 1
fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
# It will be like 'nigtly-20230808-7d0d8dc6'.
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
exit 0
fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a dev build.
# It will be like 'dev-2023080819-f0e7216c'.
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
if [ -z "$COMMIT_SHA" ]; then
echo "COMMIT_SHA is empty in dev build"
exit 1
fi
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
exit 0
fi
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
if [ "$GITHUB_EVENT_NAME" = push ]; then
if [ -z "$GITHUB_REF_NAME" ]; then
echo "GITHUB_REF_NAME is empty in push event"
exit 1
fi
echo "$GITHUB_REF_NAME"
elif [ "$GITHUB_EVENT_NAME" = workflow_dispatch ]; then
echo "$NEXT_RELEASE_VERSION-$(git rev-parse --short HEAD)-$(date "+%Y%m%d-%s")"
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
else
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
exit 1
fi
}
# You can run as following examples:
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
create_version

View File

@@ -1,169 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
# Ceate a cluster with 1 control-plane node and 5 workers.
function create_kind_cluster() {
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
- role: worker
- role: worker
- role: worker
EOF
}
# Add greptime Helm chart repo.
function add_greptime_chart() {
helm repo add greptime "$GREPTIME_CHART"
helm repo update
}
# Deploy a etcd cluster with 3 members.
function deploy_etcd_cluster() {
local namespace="$1"
helm install etcd "$ETCD_CHART" \
--set replicaCount=3 \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
-n "$namespace"
# Wait for etcd cluster to be ready.
kubectl rollout status statefulset/etcd -n "$namespace"
}
# Deploy greptimedb-operator.
function deploy_greptimedb_operator() {
# Use the latest chart and image.
helm install greptimedb-operator greptime/greptimedb-operator \
--set image.tag=latest \
-n "$DEFAULT_INSTALL_NAMESPACE"
# Wait for greptimedb-operator to be ready.
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
}
# Deploy greptimedb cluster by using local storage.
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
function deploy_greptimedb_cluster() {
local cluster_name=$1
local install_namespace=$2
kubectl create ns "$install_namespace"
deploy_etcd_cluster "$install_namespace"
helm install "$cluster_name" greptime/greptimedb-cluster \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
-n "$install_namespace"
# Wait for greptimedb cluster to be ready.
while true; do
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
sleep 5 # wait for 5 seconds before check again.
fi
done
# Expose greptimedb cluster to local access.
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
14000:4000 \
14001:4001 \
14002:4002 \
14003:4003 > /tmp/connections.out &
}
# Deploy greptimedb cluster by using S3.
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
function deploy_greptimedb_cluster_with_s3_storage() {
local cluster_name=$1
local install_namespace=$2
kubectl create ns "$install_namespace"
deploy_etcd_cluster "$install_namespace"
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
--set storage.s3.region="$AWS_REGION" \
--set storage.s3.root="$DATA_ROOT" \
--set storage.credentials.secretName=s3-credentials \
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
# Wait for greptimedb cluster to be ready.
while true; do
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
sleep 5 # wait for 5 seconds before check again.
fi
done
# Expose greptimedb cluster to local access.
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
24000:4000 \
24001:4001 \
24002:4002 \
24003:4003 > /tmp/connections.out &
}
# Deploy standalone greptimedb.
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
function deploy_standalone_greptimedb() {
helm install greptimedb-standalone greptime/greptimedb-standalone \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
-n "$DEFAULT_INSTALL_NAMESPACE"
# Wait for etcd cluster to be ready.
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
# Expose greptimedb to local access.
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
34000:4000 \
34001:4001 \
34002:4002 \
34003:4003 > /tmp/connections.out &
}
# Entrypoint of the script.
function main() {
create_kind_cluster
add_greptime_chart
# Deploy standalone greptimedb in the same K8s.
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
deploy_standalone_greptimedb
fi
deploy_greptimedb_operator
deploy_greptimedb_cluster testcluster testcluster
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
}
# Usages:
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
main

View File

@@ -1,66 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --wait 2m --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@@ -1,102 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
ARTIFACTS_DIR=$1
VERSION=$2
AWS_S3_BUCKET=$3
RELEASE_DIRS="releases/greptimedb"
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
# Check if necessary variables are set.
function check_vars() {
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
exit 1
fi
done
}
# Uploads artifacts to AWS S3 bucket.
function upload_artifacts() {
# The bucket layout will be:
# releases/greptimedb
# ├── latest-version.txt
# ├── latest-nightly-version.txt
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done
}
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt
aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi
# If it's the nightly release, update latest-nightly-version.txt.
if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt
aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi
fi
}
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
function download_artifacts_from_github() {
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
# Check if jq is installed.
if ! command -v jq &> /dev/null; then
echo "jq is not installed. Please install jq to continue."
exit 1
fi
# Get the latest release API response.
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
# Extract download URLs for the artifacts.
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
# Download each asset.
while IFS= read -r url; do
if [ -n "$url" ]; then
curl -LJO "$url"
echo "Downloaded: $url"
fi
done <<< "$ASSET_URLS"
fi
}
function main() {
check_vars
download_artifacts_from_github
upload_artifacts
update_version_info
}
# Usage example:
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
# AWS_DEFAULT_REGION=<your_region> \
# UPDATE_VERSION_INFO=true \
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
main

View File

@@ -1,7 +1,7 @@
on:
push:
branches:
- main
- develop
paths-ignore:
- 'docs/**'
- 'config/**'
@@ -12,15 +12,20 @@ on:
name: Build API docs
env:
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
apidoc:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- run: cargo doc --workspace --no-deps --document-private-items
- run: |
cat <<EOF > target/doc/index.html
@@ -35,4 +40,3 @@ jobs:
uses: JamesIves/github-pages-deploy-action@v4
with:
folder: target/doc
single-commit: true

View File

@@ -1,361 +0,0 @@
# Development build only build the debug version of the artifacts manually.
name: GreptimeDB Development Build
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
repository:
description: The public repository to build
required: false
default: GreptimeTeam/greptimedb
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build
required: true
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: true
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: true
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: true
cargo_profile:
type: choice
description: The cargo profile to use in building GreptimeDB.
default: nightly
options:
- dev
- release
- nightly
# Use env variables to control all the release process.
env:
CARGO_PROFILE: ${{ inputs.cargo_profile }}
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# Always use 'dev' to indicate it's the dev build.
NEXT_RELEASE_VERSION: dev
NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
IMAGE_NAME: greptimedb-dev
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Create version
id: create-version
run: |
version=$(./.github/scripts/create-version.sh) && \
echo $version && \
echo "version=$version" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
COMMIT_SHA: ${{ inputs.commit }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout greptimedb
uses: actions/checkout@v4
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout greptimedb
uses: actions/checkout@v4
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
dev-mode: true # Only build the standard greptime binary.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-20.04
outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
- name: Set build result
id: set-build-result
run: |
echo "build-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
release-images-to-dockerhub,
]
runs-on: ubuntu-20.04
continue-on-error: true
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -1,7 +1,6 @@
on:
merge_group:
pull_request:
types: [ opened, synchronize, reopened, ready_for_review ]
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- 'docs/**'
- 'config/**'
@@ -9,9 +8,9 @@ on:
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
push:
branches:
- develop
- main
paths-ignore:
- 'docs/**'
@@ -20,736 +19,217 @@ on:
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
workflow_dispatch:
name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
RUST_TOOLCHAIN: nightly-2023-02-26
jobs:
check-typos-and-docs:
name: Check typos and docs
runs-on: ubuntu-20.04
typos:
name: Spell Check with Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: crate-ci/typos@master
- name: Check the config docs
run: |
make config-docs && \
git diff --name-only --exit-code ./config/config.md \
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
- uses: actions/checkout@v3
- uses: crate-ci/typos@v1.13.10
check:
name: Check
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-2022, ubuntu-20.04 ]
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
- name: Run cargo check
run: cargo check --locked --workspace --all-targets
run: cargo check --workspace --all-targets
toml:
name: Toml Check
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked
run: cargo install taplo-cli --version ^0.8 --locked
- name: Run taplo
run: taplo format --check
run: taplo format --check --option "indent_string= "
build:
name: Build GreptimeDB binaries
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-binaries"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime binaries
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries
shell: bash
run: |
mkdir bins && \
mv ./target/debug/greptime bins && \
mv ./target/debug/sqlness-runner bins
- name: Print greptime binaries info
run: ls -lh bins
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bins
version: current
fuzztest:
name: Fuzz Test
needs: build
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build-greptime-ci
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip bianry
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Run Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable: 'true'
- name: Upload unstable fuzz test logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: unstable-fuzz-logs
path: /tmp/unstable-greptime/
retention-days: 3
build-greptime-ci:
name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Check aws-lc-sys will not build
shell: bash
run: |
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
exit 1
fi
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/ci/greptime bin
- name: Print greptime binaries info
run: ls -lh bin
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bin
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
timeout-minutes: 60
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
distributed-fuzztest-with-chaos:
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
timeout-minutes: 60
strategy:
matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
include:
- target: "fuzz_migrate_mito_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
- target: "fuzz_migrate_metric_regions"
mode:
name: "Local WAL"
minio: true
kafka: false
values: "with-minio.yaml"
steps:
- name: Remove unused software
run: |
echo "Disk space before:"
df -h
[[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
[[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
[[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
[[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
echo "Disk space after:"
df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh
uses: ./.github/actions/setup-chaos
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
enable-region-failover: ${{ matrix.mode.kafka }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
# Use coverage to run test.
# test:
# name: Test Suite
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v3
# - name: Cache LLVM and Clang
# id: cache-llvm
# uses: actions/cache@v3
# with:
# path: ./llvm
# key: llvm
# - uses: arduino/setup-protoc@v1
# with:
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# - uses: KyleMayes/install-llvm-action@v1
# with:
# version: "14.0"
# cached: ${{ steps.cache-llvm.outputs.cache-hit }}
# - uses: dtolnay/rust-toolchain@master
# with:
# toolchain: ${{ env.RUST_TOOLCHAIN }}
# - name: Rust Cache
# uses: Swatinem/rust-cache@v2
# - name: Cleanup disk
# uses: curoky/cleanup-disk-action@v2.0
# with:
# retain: 'rust,llvm'
# - name: Install latest nextest release
# uses: taiki-e/install-action@nextest
# - name: Run tests
# run: cargo nextest run
# env:
# CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
# RUST_BACKTRACE: 1
# GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
# GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
# GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
# UNITTEST_LOG_DIR: "__unittest_logs"
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
name: Sqlness Test
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v4
- if: matrix.mode.kafka
name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries
uses: actions/download-artifact@v4
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run etcd
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
run: cargo sqlness && ls /tmp
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: sqlness-logs-${{ matrix.mode.name }}
path: /tmp/sqlness*
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
fmt:
name: Rustfmt
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format
run: make fmt-check
- name: Run cargo fmt
run: cargo fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
- name: Run cargo clippy
run: make clippy
run: cargo clippy --workspace --all-targets -- -D warnings
coverage:
if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04-8-cores
runs-on: ubuntu-latest-8-cores
timeout-minutes: 60
needs: [clippy]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
- uses: actions/checkout@v3
- uses: arduino/setup-protoc@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
- name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Install Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Setup etcd server
working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
run: pip install pyarrow
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Collect coverage data
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
UNITTEST_LOG_DIR: "__unittest_logs"
- name: Codecov upload
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./lcov.info
flags: rust
fail_ci_if_error: false
verbose: true
# compat:
# name: Compatibility Test
# needs: build
# runs-on: ubuntu-20.04
# timeout-minutes: 60
# steps:
# - uses: actions/checkout@v4
# - name: Download pre-built binaries
# uses: actions/download-artifact@v4
# with:
# name: bins
# path: .
# - name: Unzip binaries
# run: |
# mkdir -p ./bins/current
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
# - run: ./tests/compat/test-compat.sh 0.6.0

39
.github/workflows/doc-issue.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Create Issue in downstream repos
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-latest
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-latest
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@main
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

View File

@@ -1,22 +0,0 @@
name: Follow Up Docs
on:
pull_request_target:
types: [opened, edited]
permissions:
pull-requests: write
contents: read
jobs:
docbot:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue
working-directory: cyborg
run: pnpm tsx bin/follow-up-docs-issue.ts
env:
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,5 +1,4 @@
on:
merge_group:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths:
@@ -9,9 +8,9 @@ on:
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
push:
branches:
- develop
- main
paths:
- 'docs/**'
@@ -20,7 +19,6 @@ on:
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
workflow_dispatch:
name: CI
@@ -29,51 +27,29 @@ name: CI
# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks
jobs:
typos:
name: Spell Check with Typos
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: crate-ci/typos@master
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check:
name: Check
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
fmt:
name: Rustfmt
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
clippy:
name: Clippy
runs-on: ubuntu-20.04
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'
coverage:
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
sqlness:
name: Sqlness Test (${{ matrix.mode.name }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
- name: "Remote WAL"
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- run: 'echo "No action required"'

16
.github/workflows/license.yaml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: License checker
on:
push:
branches:
- develop
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-latest
name: license-header-check
steps:
- uses: actions/checkout@v2
- name: Check License Header
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236

View File

@@ -1,330 +0,0 @@
# Nightly build only do the following things:
# 1. Run integration tests;
# 2. Build binaries and images for linux-amd64 and linux-arm64 platform;
name: GreptimeDB Nightly Build
on:
schedule:
# Trigger at 00:00(UTC) on every day-of-week from Monday through Friday.
- cron: '0 0 * * 1-5'
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.2xlarge-amd64
options:
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.2xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: false
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: false
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: false
# Use env variables to control all the release process.
env:
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# Always use 'nightly' to indicate it's the nightly build.
NEXT_RELEASE_VERSION: nightly
NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Create version
id: create-version
run: |
version=$(./.github/scripts/create-version.sh) && \
echo $version && \
echo "version=$version" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-20.04
outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: true
- name: Set nightly build result
id: set-nightly-build-result
run: |
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
release-images-to-dockerhub,
]
runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false
update-version-info: false # Don't update version info in S3.
push-latest-tag: true
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -1,155 +0,0 @@
on:
schedule:
- cron: "0 23 * * 1-5"
workflow_dispatch:
name: Nightly CI
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
issues: write
jobs:
sqlness-test:
name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs-kind
path: /tmp/kind/
retention-days: 3
sqlness-windows:
name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Run sqlness
run: make sqlness-test
env:
SQLNESS_OPTS: "--preserve-state"
- name: Upload sqlness logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: C:\Users\RUNNER~1\AppData\Local\Temp\sqlness*
retention-days: 3
test-on-windows:
name: Run tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores
timeout-minutes: 60
steps:
- run: git config --global core.autocrlf false
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest
uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Install WSL distribution
uses: Vampire/setup-wsl@v2
with:
distribution: Ubuntu-22.04
- name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard
env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"
check-status:
name: Check status
needs: [sqlness-test, sqlness-windows, test-on-windows]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }}
steps:
- name: Set check result
id: set-check-result
run: |
echo "check-result=success" >> $GITHUB_OUTPUT
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [check-status]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result == 'success' }}
with:
payload: |
{"text": "Nightly CI has completed successfully."}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result != 'success' }}
with:
payload: |
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

29
.github/workflows/pr-title-checker.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: "PR Title Checker"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
- labeled
- unlabeled
jobs:
check:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.3.4
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -1,172 +0,0 @@
name: Release dev-builder images
on:
push:
branches:
- main
paths:
- rust-toolchain.toml
- 'docker/dev-builder/**'
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
release_dev_builder_ubuntu_image:
type: boolean
description: Release dev-builder-ubuntu image
required: false
default: false
release_dev_builder_centos_image:
type: boolean
description: Release dev-builder-centos image
required: false
default: false
release_dev_builder_android_image:
type: boolean
description: Release dev-builder-android image
required: false
default: false
jobs:
release-dev-builder-images:
name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-20.04-16-cores
outputs:
version: ${{ steps.set-version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure build image version
id: set-version
shell: bash
run: |
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
IMAGE_VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${IMAGE_VERSION}" >> $GITHUB_ENV
echo "version=$IMAGE_VERSION" >> $GITHUB_OUTPUT
- name: Build and push dev builder images
uses: ./.github/actions/build-dev-builder-images
with:
version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ECR_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_ECR_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.ECR_REGION }}
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@v2
env:
AWS_REGION: ${{ vars.ECR_REGION }}
with:
registry-type: public
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region
runs-on: ubuntu-20.04
needs: [
release-dev-builder-images
]
steps:
- name: Login to AliCloud Container Registry
uses: docker/login-action@v3
with:
registry: ${{ vars.ACR_IMAGE_REGISTRY }}
username: ${{ secrets.ALICLOUD_USERNAME }}
password: ${{ secrets.ALICLOUD_PASSWORD }}
- name: Push dev-builder-ubuntu image
shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image
shell: bash
if: ${{ inputs.release_dev_builder_centos_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image
shell: bash
if: ${{ inputs.release_dev_builder_android_image }}
run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -1,8 +1,3 @@
name: Release
# There are two kinds of formal release:
# 1. The tag('v*.*.*') push release: the release workflow will be triggered by the tag push event.
# 2. The scheduled release(the version will be '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD'): the release workflow will be triggered by the schedule event.
on:
push:
tags:
@@ -10,475 +5,339 @@ on:
schedule:
# At 00:00 on Monday.
- cron: '0 0 * * 1'
workflow_dispatch: # Allows you to run this workflow manually.
# Notes: The GitHub Actions ONLY support 10 inputs, and it's already used up.
inputs:
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-20.04
- ubuntu-20.04-8-cores
- ubuntu-20.04-16-cores
- ubuntu-20.04-32-cores
- ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G
- ec2-c6i.8xlarge-amd64 # 32C64G
- ec2-c6i.16xlarge-amd64 # 64C128G
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64
options:
- ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
- ec2-c6g.4xlarge-arm64 # 16C32G
- ec2-c6g.8xlarge-arm64 # 32C64G
- ec2-c6g.16xlarge-arm64 # 64C128G
macos_runner:
type: choice
description: The runner uses to build macOS artifacts
default: macos-latest
options:
- macos-latest
skip_test:
description: Do not run integration tests during the build
type: boolean
default: true
build_linux_amd64_artifacts:
type: boolean
description: Build linux-amd64 artifacts
required: false
default: false
build_linux_arm64_artifacts:
type: boolean
description: Build linux-arm64 artifacts
required: false
default: false
build_macos_artifacts:
type: boolean
description: Build macos artifacts
required: false
default: false
build_windows_artifacts:
type: boolean
description: Build Windows artifacts
required: false
default: false
publish_github_release:
type: boolean
description: Create GitHub release and upload artifacts
required: false
default: false
release_images:
type: boolean
description: Build and push images to DockerHub and ACR
required: false
default: false
# Mannually trigger only builds binaries.
workflow_dispatch:
name: Release
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2023-02-26
SCHEDULED_BUILD_VERSION_PREFIX: v0.2.0
SCHEDULED_PERIOD: nightly
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.10.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
## FIXME(zyy17): Enable it after the tests are stabled.
DISABLE_RUN_TESTS: true
jobs:
allocate-runners:
name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
windows-runner: windows-2022-8-cores
# The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-amd64-ec2-runner-instance-id: ${{ steps.start-linux-amd64-runner.outputs.ec2-instance-id }}
linux-arm64-ec2-runner-label: ${{ steps.start-linux-arm64-runner.outputs.label }}
linux-arm64-ec2-runner-instance-id: ${{ steps.start-linux-arm64-runner.outputs.ec2-instance-id }}
# The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }}
build:
name: Build binary
strategy:
matrix:
# The file format is greptime-<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64
continue-on-error: false
opts: "-F servers/dashboard"
- arch: x86_64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: aarch64-unknown-linux-gnu
os: ubuntu-2004-16-cores
file: greptime-linux-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: aarch64-apple-darwin
os: macos-latest
file: greptime-darwin-arm64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
- arch: x86_64-apple-darwin
os: macos-latest
file: greptime-darwin-amd64-pyo3
continue-on-error: false
opts: "-F pyo3_backend,servers/dashboard"
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.continue-on-error }}
if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout sources
uses: actions/checkout@v3
- name: Check Rust toolchain version
- name: Cache cargo assets
id: cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ matrix.arch }}-build-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install Protoc for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: | # Make sure the protoc is >= 3.15
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.9/protoc-21.9-linux-x86_64.zip
unzip protoc-21.9-linux-x86_64.zip -d protoc
sudo cp protoc/bin/protoc /usr/local/bin/
sudo cp -r protoc/include/google /usr/local/include/
- name: Install Protoc for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install protobuf
- name: Install etcd for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
ETCD_VER=v3.5.7
DOWNLOAD_URL=https://github.com/etcd-io/etcd/releases/download
curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
mkdir -p /tmp/etcd-download
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
sudo cp -a /tmp/etcd-download/etcd* /usr/local/bin/
nohup etcd >/tmp/etcd.log 2>&1 &
- name: Install etcd for macos
if: contains(matrix.arch, 'darwin')
run: |
brew install etcd
brew services start etcd
- name: Install dependencies for linux
if: contains(matrix.arch, 'linux') && endsWith(matrix.arch, '-gnu')
run: |
sudo apt-get -y update
sudo apt-get -y install libssl-dev pkg-config g++-aarch64-linux-gnu gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu wget
# FIXME(zyy17): Should we specify the version of python when building binary for darwin?
- name: Compile Python 3.10.10 from source for linux
if: contains(matrix.arch, 'linux') && contains(matrix.opts, 'pyo3_backend')
run: |
sudo chmod +x ./docker/aarch64/compile-python.sh
sudo ./docker/aarch64/compile-python.sh ${{ matrix.arch }}
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
run: make unit-test integration-test sqlness-test
- name: Run cargo build with pyo3 for aarch64-linux
if: contains(matrix.arch, 'aarch64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
# TODO(zyy17): We should make PYO3_CROSS_LIB_DIR configurable.
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
export PYO3_CROSS_LIB_DIR=${PWD}/python-3.10.10/aarch64
echo "PYO3_CROSS_LIB_DIR: $PYO3_CROSS_LIB_DIR"
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build with pyo3 for amd64-linux
if: contains(matrix.arch, 'x86_64-unknown-linux-gnu') && contains(matrix.opts, 'pyo3_backend')
run: |
export PYTHON_INSTALL_PATH_AMD64=${PWD}/python-3.10.10/amd64
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
echo "implementation=CPython" >> pyo3.config
echo "version=3.10" >> pyo3.config
echo "implementation=CPython" >> pyo3.config
echo "shared=true" >> pyo3.config
echo "abi3=true" >> pyo3.config
echo "lib_name=python3.10" >> pyo3.config
echo "lib_dir=$PYTHON_INSTALL_PATH_AMD64/lib" >> pyo3.config
echo "executable=$PYTHON_INSTALL_PATH_AMD64/bin/python3" >> pyo3.config
echo "pointer_width=64" >> pyo3.config
echo "build_flags=" >> pyo3.config
echo "suppress_build_script_link_lines=false" >> pyo3.config
cat pyo3.config
export PYO3_CONFIG_FILE=${PWD}/pyo3.config
alias python=$PYTHON_INSTALL_PATH_AMD64/bin/python3
alias pip=$PYTHON_INSTALL_PATH_AMD64/bin/python3-pip
cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
run: cargo build --profile ${{ env.CARGO_PROFILE }} --locked --target ${{ matrix.arch }} ${{ matrix.opts }}
- name: Calculate checksum and rename binary
shell: bash
run: |
./scripts/check-builder-rust-version.sh
cd target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}
chmod +x greptime
tar -zcvf ${{ matrix.file }}.tgz greptime
echo $(shasum -a 256 ${{ matrix.file }}.tgz | cut -f1 -d' ') > ${{ matrix.file }}.sha256sum
# The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version
id: create-version
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.tgz
- name: Upload checksum of artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.file }}.sha256sum
path: target/${{ matrix.arch }}/${{ env.CARGO_PROFILE }}/${{ matrix.file }}.sha256sum
docker:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
if: github.event_name == 'schedule'
run: |
echo "version=$(./.github/scripts/create-version.sh)" >> $GITHUB_OUTPUT
env:
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
echo "IMAGE_TAG=${SCHEDULED_BUILD_VERSION:1}" >> $GITHUB_ENV
- name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-amd64-runner
with:
runner: ${{ inputs.linux_amd64_runner || vars.DEFAULT_AMD64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
- name: Allocate linux-arm64 runner
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner
id: start-linux-arm64-runner
with:
runner: ${{ inputs.linux_arm64_runner || vars.DEFAULT_ARM64_RUNNER }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
build-linux-amd64-artifacts:
name: Build linux-amd64 artifacts
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-amd64-runner }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: amd64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
if: ${{ inputs.build_linux_arm64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
]
runs-on: ${{ needs.allocate-runners.outputs.linux-arm64-runner }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: ./.github/actions/build-linux-artifacts
with:
arch: arm64
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
build-macos-artifacts:
name: Build macOS artifacts
strategy:
fail-fast: false
matrix:
include:
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: pyo3_backend,servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
needs: [
allocate-runners,
]
if: ${{ inputs.build_macos_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: ./.github/actions/build-macos-artifacts
with:
arch: ${{ matrix.arch }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
# We decide to disable the integration tests on macOS because it's unnecessary and time-consuming.
disable-run-tests: true
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build macos result
id: set-build-macos-result
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
if: github.event_name != 'schedule'
run: |
echo "build-macos-result=success" >> $GITHUB_OUTPUT
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
build-windows-artifacts:
name: Build Windows artifacts
strategy:
fail-fast: false
matrix:
include:
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }}
outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
needs: [
allocate-runners,
]
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
steps:
- run: git config --global core.autocrlf false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- uses: actions/checkout@v4
- name: Set up buildx
uses: docker/setup-buildx-action@v2
- name: Download amd64 binary
uses: actions/download-artifact@v3
with:
fetch-depth: 0
name: greptime-linux-amd64-pyo3
path: amd64
- uses: ./.github/actions/build-windows-artifacts
with:
arch: ${{ matrix.arch }}
cargo-profile: ${{ env.CARGO_PROFILE }}
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- name: Set build windows result
id: set-build-windows-result
- name: Unzip the amd64 artifacts
run: |
echo "build-windows-result=success" >> $Env:GITHUB_OUTPUT
tar xvf amd64/greptime-linux-amd64-pyo3.tgz -C amd64/ && rm amd64/greptime-linux-amd64-pyo3.tgz
cp -r amd64 docker/ci
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
]
runs-on: ubuntu-2004-16-cores
outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps:
- uses: actions/checkout@v4
- name: Download arm64 binary
id: download-arm64
uses: actions/download-artifact@v3
with:
fetch-depth: 0
name: greptime-linux-arm64-pyo3
path: arm64
- name: Build and push images to dockerhub
uses: ./.github/actions/build-images
with:
image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
- name: Set build image result
id: set-build-image-result
- name: Unzip the arm64 artifacts
id: unzip-arm64
if: success() || steps.download-arm64.conclusion == 'success'
run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT
tar xvf arm64/greptime-linux-arm64-pyo3.tgz -C arm64/ && rm arm64/greptime-linux-arm64-pyo3.tgz
cp -r arm64 docker/ci
release-cn-artifacts:
name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [ # The job have to wait for all the artifacts are built.
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
build-macos-artifacts,
build-windows-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
- name: Build and push all
uses: docker/build-push-action@v3
if: success() || steps.unzip-arm64.conclusion == 'success' # Build and push all platform if unzip-arm64 succeeds
with:
context: ./docker/ci/
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
- name: Build and push amd64 only
uses: docker/build-push-action@v3
if: success() || steps.download-arm64.conclusion == 'failure' # Only build and push amd64 platform if download-arm64 fails
with:
context: ./docker/ci/
file: ./docker/ci/Dockerfile
push: true
platforms: linux/amd64
tags: |
greptime/greptimedb:latest
greptime/greptimedb:${{ env.IMAGE_TAG }}
release:
name: Release artifacts
# Release artifacts only when all the artifacts are built successfully.
needs: [build,docker]
runs-on: ubuntu-latest
if: github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout sources
uses: actions/checkout@v3
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false
update-version-info: true
push-latest-tag: true
- name: Download artifacts
uses: actions/download-artifact@v3
publish-github-release:
name: Create GitHub release and upload artifacts
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [ # The job have to wait for all the artifacts are built.
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
build-macos-artifacts,
build-windows-artifacts,
release-images-to-dockerhub,
]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
if: github.event_name == 'schedule'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
echo "SCHEDULED_BUILD_VERSION=${SCHEDULED_BUILD_VERSION}" >> $GITHUB_ENV
- name: Publish GitHub release
uses: ./.github/actions/publish-github-release
with:
version: ${{ needs.allocate-runners.outputs.version }}
- name: Create scheduled build git tag
if: github.event_name == 'schedule'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Publish scheduled release # configure the different release title and tags.
uses: softprops/action-gh-release@v1
if: github.event_name == 'schedule'
with:
fetch-depth: 0
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
tag_name: ${{ env.SCHEDULED_BUILD_VERSION }}
generate_release_notes: true
files: |
**/greptime-*
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
- name: Publish release
uses: softprops/action-gh-release@v1
if: github.event_name != 'schedule'
with:
label: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-amd64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
# Only run this job when the runner is allocated.
if: ${{ always() }}
runs-on: ubuntu-20.04
needs: [
allocate-runners,
build-linux-arm64-artifacts,
]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Stop EC2 runner
uses: ./.github/actions/stop-runner
with:
label: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-label }}
ec2-instance-id: ${{ needs.allocate-runners.outputs.linux-arm64-ec2-runner-instance-id }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team
needs: [
release-images-to-dockerhub,
build-macos-artifacts,
build-windows-artifacts,
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
- name: Notify release successful result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has completed successfully."}
- name: Notify release failed result
uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
with:
payload: |
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
name: "Release ${{ github.ref_name }}"
files: |
**/greptime-*

View File

@@ -1,24 +0,0 @@
name: Schedule Management
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
maintenance:
name: Periodic Maintenance
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts
env:
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -1,21 +0,0 @@
name: "Semantic Pull Request"
on:
pull_request_target:
types:
- opened
- reopened
- edited
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

11
.gitignore vendored
View File

@@ -1,8 +1,6 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# also ignore if it's a symbolic link
/target
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
@@ -41,12 +39,3 @@ benchmarks/data
# dashboard files
!/src/servers/dashboard/VERSION
/src/servers/dashboard/*
# Vscode workspace
*.code-workspace
venv/
# Fuzz tests
tests-fuzz/artifacts/
tests-fuzz/corpus/

14
.licenserc.yaml Normal file
View File

@@ -0,0 +1,14 @@
header:
license:
spdx-id: Apache-2.0
copyright-owner: Greptime Team
paths:
- "**/*.rs"
- "**/*.py"
comment: on-failure
dependency:
files:
- Cargo.toml

View File

@@ -16,7 +16,6 @@ repos:
hooks:
- id: fmt
- id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
args: ["--workspace", "--all-targets", "--", "-D", "warnings", "-D", "clippy::print_stdout", "-D", "clippy::print_stderr"]
stages: [push]
- id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"]

View File

@@ -1,43 +0,0 @@
# GreptimeDB Authors
## Individual Committers (in alphabetical order)
* [CookiePieWw](https://github.com/CookiePieWw)
* [KKould](https://github.com/KKould)
* [NiwakaDev](https://github.com/NiwakaDev)
* [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj)
## Team Members (in alphabetical order)
* [Breeze-P](https://github.com/Breeze-P)
* [GrepTime](https://github.com/GrepTime)
* [MichaelScofield](https://github.com/MichaelScofield)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [ZonaHex](https://github.com/ZonaHex)
* [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678)
* [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996)
* [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008)
* [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [tisonkun](https://github.com/tisonkun)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [zyy17](https://github.com/zyy17)
## All Contributors
[![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

132
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,132 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
info@greptime.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -2,11 +2,7 @@
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
A committer will be granted both read & write access to GreptimeDB repos. Check the [AUTHOR.md](AUTHOR.md) file for all current individual committers.
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
@@ -14,7 +10,7 @@ Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get th
It can feel intimidating to contribute to a complex project, but it can also be exciting and fun. These general notes will help everyone participate in this communal activity.
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md)
- Follow the [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md)
- Small changes make huge differences. We will happily accept a PR making a single character change if it helps move forward. Don't wait to have everything working.
- Check the closed issues before opening your issue.
- Try to follow the existing style of the code.
@@ -25,12 +21,12 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
- Submitting bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
## Code of Conduct
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/.github/blob/main/.github/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
Also, there are things that we are not looking for because they don't match the goals of the product or benefit the community. Please read [Code of Conduct](https://github.com/GreptimeTeam/greptimedb/blob/develop/CODE_OF_CONDUCT.md); we hope everyone can keep good manners and become an honored member.
## License
@@ -53,9 +49,8 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
### Before PR
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks
@@ -86,7 +81,7 @@ Now, `pre-commit` will run automatically on `git commit`.
### Title
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
like `feat`/`fix`/`docs`, with a concise summary of code change following. AVOID using the last commit message as pull request title.
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
### Description
@@ -105,13 +100,13 @@ of what you were trying to do and what went wrong. You can also reach for help i
## Community
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content:
- [GreptimeDB Docs](https://docs.greptime.com/)
- [Learn GreptimeDB](https://greptime.com/product/db)
- [GreptimeDB Docs](https://greptime.com/docs)
- [Learn GreptimeDB](https://greptime.com/products/db)
- [Greptime Inc. Website](https://greptime.com)

10073
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,28 +1,20 @@
[workspace]
members = [
"benchmarks",
"src/api",
"src/auth",
"src/catalog",
"src/cache",
"src/client",
"src/cmd",
"src/common/base",
"src/common/catalog",
"src/common/config",
"src/common/datasource",
"src/common/error",
"src/common/frontend",
"src/common/function",
"src/common/macro",
"src/common/greptimedb-telemetry",
"src/common/function-macro",
"src/common/grpc",
"src/common/grpc-expr",
"src/common/mem-prof",
"src/common/meta",
"src/common/plugins",
"src/common/pprof",
"src/common/procedure",
"src/common/procedure-test",
"src/common/query",
"src/common/recordbatch",
"src/common/runtime",
@@ -30,260 +22,71 @@ members = [
"src/common/telemetry",
"src/common/test-util",
"src/common/time",
"src/common/decimal",
"src/common/version",
"src/common/wal",
"src/datanode",
"src/datatypes",
"src/file-engine",
"src/flow",
"src/file-table-engine",
"src/frontend",
"src/log-store",
"src/meta-client",
"src/meta-srv",
"src/metric-engine",
"src/mito2",
"src/mito",
"src/object-store",
"src/operator",
"src/partition",
"src/pipeline",
"src/plugins",
"src/promql",
"src/puffin",
"src/query",
"src/script",
"src/servers",
"src/session",
"src/sql",
"src/storage",
"src/store-api",
"src/table",
"src/index",
"tests-fuzz",
"src/table-procedure",
"tests-integration",
"tests/runner",
]
resolver = "2"
[workspace.package]
version = "0.9.5"
version = "0.2.0"
edition = "2021"
license = "Apache-2.0"
[workspace.lints]
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false).
#
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3"
arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "51.0"
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "51.0", features = ["serde"] }
arrow = { version = "37.0" }
arrow-array = "37.0"
arrow-flight = "37.0"
arrow-schema = { version = "37.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
base64 = "0.21"
bigdecimal = "0.4.2"
bitflags = "2.4.1"
bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = { version = "0.13" }
fst = "0.4.7"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74a778ca6016a853a3c3add3fa8c6f12f4fe4561" }
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
moka = "0.12"
notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { version = "0.5", features = [
"gen-tonic",
"metrics",
"trace",
"with-serde",
"logs",
] }
parking_lot = "0.12"
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
parquet = "37.0"
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4.3", features = ["ser"] }
prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
prost = "0.11"
rand = "0.8"
ratelimit = "0.9"
regex = "1.8"
regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [
"json",
"rustls-tls-native-roots",
"stream",
"multipart",
] }
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33"
rustc-hash = "2.0"
schemars = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3"
shadow-rs = "0.35"
similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.33"
tempfile = "3"
tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7"
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = { version = "0.4" }
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
## workspaces members
api = { path = "src/api" }
auth = { path = "src/auth" }
cache = { path = "src/cache" }
catalog = { path = "src/catalog" }
client = { path = "src/client" }
cmd = { path = "src/cmd", default-features = false }
common-base = { path = "src/common/base" }
common-catalog = { path = "src/common/catalog" }
common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" }
common-decimal = { path = "src/common/decimal" }
common-error = { path = "src/common/error" }
common-frontend = { path = "src/common/frontend" }
common-function = { path = "src/common/function" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" }
common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" }
common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" }
common-recordbatch = { path = "src/common/recordbatch" }
common-runtime = { path = "src/common/runtime" }
common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" }
common-wal = { path = "src/common/wal" }
datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" }
frontend = { path = "src/frontend", default-features = false }
index = { path = "src/index" }
log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" }
metric-engine = { path = "src/metric-engine" }
mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" }
operator = { path = "src/operator" }
partition = { path = "src/partition" }
pipeline = { path = "src/pipeline" }
plugins = { path = "src/plugins" }
promql = { path = "src/promql" }
puffin = { path = "src/puffin" }
query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" }
session = { path = "src/session" }
sql = { path = "src/sql" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
tokio = { version = "1.24.2", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util"] }
tonic = { version = "0.9", features = ["tls"] }
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
[profile.release]
debug = 1
debug = true
[profile.nightly]
inherits = "release"
strip = "debuginfo"
strip = true
lto = "thin"
debug = false
incremental = false
[profile.ci]
inherits = "dev"
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -1,7 +0,0 @@
[build]
pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH",
"apt update && apt install -y unzip zlib1g-dev zlib1g-dev:$CROSS_DEB_ARCH",
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
]

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2022 Greptime Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

209
Makefile
View File

@@ -1,113 +1,15 @@
# The arguments for building images.
CARGO_PROFILE ?=
FEATURES ?=
TARGET_DIR ?=
TARGET ?=
BUILD_BIN ?= greptime
CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_REGISTRY ?= greptimedb
IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2)
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
RETRY_COUNT ?= 3
NEXTEST_OPTS := --retries ${RETRY_COUNT}
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
BUILD_JOBS := 1
endif
ifneq ($(strip $(BUILD_JOBS)),)
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
endif
ifneq ($(strip $(CARGO_PROFILE)),)
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
endif
ifneq ($(strip $(FEATURES)),)
CARGO_BUILD_OPTS += --features ${FEATURES}
endif
ifneq ($(strip $(TARGET_DIR)),)
CARGO_BUILD_OPTS += --target-dir ${TARGET_DIR}
endif
ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET}
endif
ifneq ($(strip $(BUILD_BIN)),)
CARGO_BUILD_OPTS += --bin ${BUILD_BIN}
endif
ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release
endif
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif
ifneq ($(strip $(CARGO_BUILD_EXTRA_OPTS)),)
CARGO_BUILD_OPTS += ${CARGO_BUILD_EXTRA_OPTS}
endif
##@ Build
.PHONY: build
build: ## Build debug version greptime.
cargo ${CARGO_EXTENSION} build ${CARGO_BUILD_OPTS}
cargo build
.PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="${CARGO_EXTENSION}" \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=${TARGET_DIR} \
TARGET=${TARGET} \
RELEASE=${RELEASE} \
CARGO_BUILD_EXTRA_OPTS="${CARGO_BUILD_EXTRA_OPTS}"
.PHONY: build-android-bin
build-android-bin: ## Build greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
make build \
CARGO_EXTENSION="ndk --platform 23 -t aarch64-linux-android" \
CARGO_PROFILE=release \
FEATURES="${FEATURES}" \
TARGET_DIR="${TARGET_DIR}" \
TARGET="${TARGET}" \
RELEASE="${RELEASE}" \
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
.PHONY: strip-android-bin
strip-android-bin: build-android-bin ## Strip greptime binary for android.
docker run --network=host \
-v ${PWD}:/greptimedb \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:${DEV_BUILDER_IMAGE_TAG} \
bash -c '$${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip --strip-debug /greptimedb/target/aarch64-linux-android/release/greptime'
.PHONY: release
release: ## Build release version greptime.
cargo build --release
.PHONY: clean
clean: ## Clean the project.
@@ -119,112 +21,41 @@ fmt: ## Format all the Rust code.
.PHONY: fmt-toml
fmt-toml: ## Format all TOML files.
taplo format
taplo format --option "indent_string= "
.PHONY: check-toml
check-toml: ## Check all TOML files.
taplo format --check
taplo format --check --option "indent_string= "
.PHONY: docker-image
docker-image: build-by-dev-builder ## Build docker image.
mkdir -p ${ARCH} && \
cp ./target/${OUTPUT_DIR}/greptime ${ARCH}/greptime && \
docker build -f docker/ci/${BASE_IMAGE}/Dockerfile -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} . && \
rm -r ${ARCH}
.PHONY: docker-image-buildx
docker-image-buildx: multi-platform-buildx ## Build docker image by buildx.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="CARGO_PROFILE=${CARGO_PROFILE}" \
--build-arg="FEATURES=${FEATURES}" \
--build-arg="OUTPUT_DIR=${OUTPUT_DIR}" \
-f docker/buildx/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: dev-builder
dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
-f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder.
docker buildx inspect ${BUILDX_BUILDER_NAME} || docker buildx create --name ${BUILDX_BUILDER_NAME} --driver docker-container --bootstrap --use
docker-image: ## Build docker image.
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
##@ Test
.PHONY: test
test: nextest ## Run unit and integration tests.
cargo nextest run ${NEXTEST_OPTS}
.PHONY: nextest
nextest: ## Install nextest tools.
cargo --list | grep nextest || cargo install cargo-nextest --locked
.PHONY: unit-test
unit-test: ## Run unit test.
cargo test --workspace
.PHONY: integration-test
integration-test: ## Run integation test.
cargo test integration
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls
fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz
cargo sqlness
.PHONY: check
check: ## Cargo check all the targets.
cargo check --workspace --all-targets --all-features
cargo check --workspace --all-targets
.PHONY: clippy
clippy: ## Check clippy rules.
cargo clippy --workspace --all-targets --all-features -- -D warnings
.PHONY: fix-clippy
fix-clippy: ## Fix clippy violations.
cargo clippy --workspace --all-targets --all-features --fix
cargo clippy --workspace --all-targets -- -D warnings
.PHONY: fmt-check
fmt-check: ## Check code format.
cargo fmt --all -- --check
python3 scripts/check-snafu.py
.PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose.
docker run --rm -d --network=host -p 2379-2380:2379-2380 ${ETCD_IMAGE}
.PHONY: stop-etcd
stop-etcd: ## Stop single node etcd for testing purpose.
docker stop $$(docker ps -q --filter ancestor=${ETCD_IMAGE})
.PHONY: run-it-in-container
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: start-cluster
start-cluster: ## Start the greptimedb cluster with etcd by using docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
.PHONY: stop-cluster
stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Docs
config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
toml2docs/toml2docs:v0.1.3 \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md
##@ General
@@ -241,4 +72,4 @@ config-docs: ## Generate configuration documentation from toml files.
.PHONY: help
help: ## Display help messages.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

290
README.md
View File

@@ -1,159 +1,183 @@
<p align="center">
<picture>
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@main/docs/logo-text-padding.png" width="400px">
<source media="(prefers-color-scheme: light)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png">
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding-dark.png">
<img alt="GreptimeDB Logo" src="https://cdn.jsdelivr.net/gh/GreptimeTeam/greptimedb@develop/docs/logo-text-padding.png" width="400px">
</picture>
</p>
<h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
<div align="center">
<h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4>
The next-generation hybrid time-series/analytics processing database in the cloud
</h3>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/v/release/GreptimeTeam/greptimedb.svg" alt="Version"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
<img src="https://img.shields.io/github/release-date/GreptimeTeam/greptimedb.svg" alt="Releases"/>
</a>
<a href="https://hub.docker.com/r/greptime/greptimedb/">
<img src="https://img.shields.io/docker/pulls/greptime/greptimedb.svg" alt="Docker Pulls"/>
</a>
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml">
<img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="GitHub Actions"/>
</a>
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb">
<img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C" alt="Codecov"/>
</a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/greptimeTeam/greptimedb" alt="License"/>
</a>
<p align="center">
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/develop/graph/badge.svg?token=FITFDI3J3C"></img></a>
&nbsp;
<a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
&nbsp;
<a href="https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
</p>
<br/>
<p align="center">
<a href="https://twitter.com/greptime"><img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg"></a>
&nbsp;
<a href="https://www.linkedin.com/company/greptime/"><img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg"></a>
&nbsp;
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
</p>
<a href="https://greptime.com/slack">
<img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack&style=for-the-badge" alt="Slack"/>
</a>
<a href="https://twitter.com/greptime">
<img src="https://img.shields.io/badge/twitter-follow_us-1d9bf0.svg?style=for-the-badge" alt="Twitter"/>
</a>
<a href="https://www.linkedin.com/company/greptime/">
<img src="https://img.shields.io/badge/linkedin-connect_with_us-0a66c2.svg?style=for-the-badge" alt="LinkedIn"/>
</a>
</div>
## What is GreptimeDB
## Introduction
GreptimeDB is an open-source time-series database with a special focus on
scalability, analytical capabilities and efficiency. It's designed to work on
infrastructure of the cloud era, and users benefit from its elasticity and commodity
storage.
**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
Our core developers have been building time-series data platform
for years. Based on their best-practices, GreptimeDB is born to give you:
## Why GreptimeDB
- A standalone binary that scales to highly-available distributed cluster, providing a transparent experience for cluster users
- Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends
- Flexible indexes, tackling high cardinality issues down
- Distributed, parallel query execution, leveraging elastic computing resource
- Native SQL, and Python scripting for advanced analytical scenarios
- Widely adopted database protocols and APIs, native PromQL supports
- Extensible table engine architecture for extensive workloads
Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
## Quick Start
* **Unified all kinds of time series**
### Build
GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
#### Build from Source
* **Cloud-Edge collaboration**
To compile GreptimeDB from source, you'll need:
GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
- C/C++ Toolchain: provides basic tools for compiling and linking. This is
available as `build-essential` on ubuntu and similar name on other platforms.
- Rust: the easiest way to install Rust is to use
[`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and
install correct Rust version for you.
- Protobuf: `protoc` is required for compiling `.proto` files. `protobuf` is
available from major package manager on macos and linux distributions. You can
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
- python3-dev or python3-devel(Optional feature, only needed if you want to run scripts
in CPython, and also need to enable `pyo3_backend` feature when compiling(by `cargo run -F pyo3_backend` or add `pyo3_backend` to src/script/Cargo.toml 's `features.default` like `default = ["python", "pyo3_backend]`)): this install a Python shared library required for running Python
scripting engine(In CPython Mode). This is available as `python3-dev` on
ubuntu, you can install it with `sudo apt install python3-dev`, or
`python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
`Python3` package should have this shared library by default. More detail for compiling with PyO3 can be found in [PyO3](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version)'s documentation.
* **Cloud-native distributed database**
#### Build with Docker
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
A docker image with necessary dependencies is provided:
* **Performance and Cost-effective**
Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Compatible with InfluxDB, Prometheus and more protocols**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/)
Start instantly with a free cluster.
### 3. Docker Image
To install GreptimeDB locally, the recommended way is via Docker:
```shell
docker pull greptime/greptimedb
```
docker build --network host -f docker/Dockerfile -t greptimedb .
```
Start a GreptimeDB container with:
### Run
Start GreptimeDB from source code, in standalone mode:
```shell
docker run --rm --name greptime --net=host greptime/greptimedb standalone start
```
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started
* [Quickstart](https://docs.greptime.com/getting-started/quick-start)
* [User Guide](https://docs.greptime.com/user-guide/overview)
* [Demos](https://github.com/GreptimeTeam/demo-scene)
* [FAQ](https://docs.greptime.com/faq-and-others/faq)
## Build
Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
Build GreptimeDB binary:
```shell
make
```
Run a standalone server:
```shell
cargo run -- standalone start
```
## Extension
Or if you built from docker:
```
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
```
For more startup options, greptimedb's **distributed mode** and information
about Kubernetes deployment, check our [docs](https://docs.greptime.com/).
### Connect
1. Connect to GreptimeDB via standard [MySQL
client](https://dev.mysql.com/downloads/mysql/):
```
# The standalone instance listen on port 4002 by default.
mysql -h 127.0.0.1 -P 4002
```
2. Create table:
```SQL
CREATE TABLE monitor (
host STRING,
ts TIMESTAMP,
cpu DOUBLE DEFAULT 0,
memory DOUBLE,
TIME INDEX (ts),
PRIMARY KEY(host)) ENGINE=mito WITH(regions=1);
```
3. Insert some data:
```SQL
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
```
4. Query the data:
```SQL
SELECT * FROM monitor;
```
```TEXT
+-------+---------------------+------+--------+
| host | ts | cpu | memory |
+-------+---------------------+------+--------+
| host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
| host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
| host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
+-------+---------------------+------+--------+
3 rows in set (0.01 sec)
```
You can always cleanup test database by removing `/tmp/greptimedb`.
## Resources
### Installation
- [Pre-built Binaries](https://github.com/GreptimeTeam/greptimedb/releases):
For Linux and macOS, you can easily download pre-built binaries that are ready to use. In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version. We recommend using virtualenv for the installation process to manage multiple Python versions.
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
Docker images, this is the easiest way to try GreptimeDB. By default it runs CPython script with `pyo3_backend` enabled.
- [`gtctl`](https://github.com/GreptimeTeam/gtctl): the command-line tool for
Kubernetes deployment
### Documentation
- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
- GreptimeDB [Developer
Guide](https://docs.greptime.com/developer-guide/overview.html)
- GreptimeDB [internal code document](https://greptimedb.rs)
### Dashboard
- [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
### SDK
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
- [GreptimeDB Java
Client](https://github.com/GreptimeTeam/greptimedb-client-java)
## Project Status
The current version has not yet reached the standards for General Availability.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
This project is in its early stage and under heavy development. We move fast and
break things. Benchmark on development branch may not represent its potential
performance. We release pre-built binaries constantly for functional
evaluation. Do not use it in production at the moment.
We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
For future plans, check out [GreptimeDB roadmap](https://github.com/GreptimeTeam/greptimedb/issues/669).
## Community
@@ -163,37 +187,29 @@ and what went wrong. If you have any questions or if you would like to get invol
community, please check out:
- GreptimeDB Community on [Slack](https://greptime.com/slack)
- GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [website](https://greptime.com)
- GreptimeDB GitHub [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
- Greptime official [Website](https://greptime.com)
In addition, you may:
- View our official [Blog](https://greptime.com/blogs/)
- View our official [Blog](https://greptime.com/blogs/index)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License
GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
GreptimeDB uses the [Apache 2.0 license][1] to strike a balance between
open contributions and allowing you to use the software however you want.
[1]: <https://github.com/greptimeTeam/greptimedb/blob/develop/LICENSE>
## Contributing
Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement
Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDBs meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

14
benchmarks/Cargo.toml Normal file
View File

@@ -0,0 +1,14 @@
[package]
name = "benchmarks"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
arrow.workspace = true
clap = { version = "4.0", features = ["derive"] }
client = { path = "../src/client" }
indicatif = "0.17.1"
itertools = "0.10.5"
parquet.workspace = true
tokio.workspace = true

View File

@@ -0,0 +1,462 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
const TABLE_NAME: &str = "nyc_taxi";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
struct Args {
/// Path to the dataset
#[arg(short, long)]
path: Option<String>,
/// Batch size of insert request.
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
batch_size: usize,
/// Number of client threads on write (parallel on file level)
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
thread_num: usize,
/// Number of query iteration
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
iter_num: usize,
#[arg(long = "skip-write")]
skip_write: bool,
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
std::fs::read_dir(path)
.unwrap()
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
.collect()
}
async fn write_data(
batch_size: usize,
db: &Database,
path: PathBuf,
mpb: MultiProgress,
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: TABLE_NAME.to_string(),
region_number: 0,
columns,
row_count,
};
let now = Instant::now();
db.insert(request).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let column = Column {
column_name: field.name().clone(),
values: Some(values),
null_mask: array
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
// datatype and semantic_type are set to default
..Default::default()
};
columns.push(column);
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
(
Values {
ts_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Interval(_)
| DataType::Binary
| DataType::FixedSizeBinary(_)
| DataType::LargeBinary
| DataType::LargeUtf8
| DataType::List(_)
| DataType::FixedSizeList(_, _)
| DataType::LargeList(_)
| DataType::Struct(_)
| DataType::Union(_, _)
| DataType::Dictionary(_, _)
| DataType::Decimal128(_, _)
| DataType::Decimal256(_, _)
| DataType::RunEndEncoded(_, _)
| DataType::Map(_, _) => todo!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr() -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: TABLE_NAME.to_string(),
desc: "".to_string(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
datatype: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "passenger_count".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "trip_distance".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "RatecodeID".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
datatype: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "PULocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "DOLocationID".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "payment_type".to_string(),
datatype: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "fare_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "extra".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "mta_tax".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "tip_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "tolls_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "improvement_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "total_amount".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "congestion_surcharge".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
ColumnDef {
name: "airport_fee".to_string(),
datatype: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
region_ids: vec![0],
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set() -> HashMap<String, String> {
let mut ret = HashMap::new();
ret.insert(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
);
ret
}
async fn do_write(args: &Args, db: &Database) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = db.create(create_table_expr()).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
)
.unwrap()
.progress_chars("##-");
let multi_progress_bar = MultiProgress::new();
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
file_progress.inc(0);
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
write_jobs.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
}
}
}
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let _res = db.sql(&query).await.unwrap();
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis()
);
}
}
}
fn main() {
let args = Args::parse();
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.thread_num)
.enable_all()
.build()
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
if !args.skip_write {
do_write(&args, &db).await;
}
if !args.skip_read {
do_query(args.iter_num, &db).await;
}
})
}

View File

@@ -1,127 +0,0 @@
# https://git-cliff.org/docs/configuration
[remote.github]
owner = "GreptimeTeam"
repo = "greptimedb"
[changelog]
header = ""
footer = ""
# template for the changelog body
# https://keats.github.io/tera/docs/#introduction
body = """
# {{ version }}
Release date: {{ timestamp | date(format="%B %d, %Y") }}
{%- set breakings = commits | filter(attribute="breaking", value=true) -%}
{%- if breakings | length > 0 %}
## Breaking changes
{% for commit in breakings %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor %}
{%- endif -%}
{%- set grouped_commits = commits | filter(attribute="breaking", value=false) | group_by(attribute="group") -%}
{% for group, commits in grouped_commits %}
### {{ group | striptags | trim | upper_first }}
{% for commit in commits %}
* {{ commit.github.pr_title }}\
{% if commit.github.username %} by \
{% set author = commit.github.username -%}
[@{{ author }}](https://github.com/{{ author }})
{%- endif -%}
{% if commit.github.pr_number %} in \
{% set number = commit.github.pr_number -%}
[#{{ number }}]({{ self::remote_url() }}/pull/{{ number }})
{%- endif %}
{%- endfor -%}
{% endfor %}
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
{% raw %}\n{% endraw -%}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
{%- endfor -%}
{% if github.contributors | length != 0 %}
{% raw %}\n{% endraw -%}
## All Contributors
We would like to thank the following contributors from the GreptimeDB community:
{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
{%- set bots = ['dependabot[bot]'] %}
{% for contributor in contributors %}
{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
{%- if loop.first -%}
[@{{ contributor }}](https://github.com/{{ contributor }})
{%- else -%}
, [@{{ contributor }}](https://github.com/{{ contributor }})
{%- endif -%}
{%- endfor %}
{%- endif %}
{% raw %}\n{% endraw %}
{%- macro remote_url() -%}
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
{%- endmacro -%}
"""
trim = true
[git]
# parse the commits based on https://www.conventionalcommits.org
conventional_commits = true
# filter out the commits that are not conventional
filter_unconventional = true
# process each line of a commit as an individual commit
split_commits = false
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
{ message = "^chore\\(release\\): prepare for", skip = true },
{ message = "^chore\\(deps.*\\)", skip = true },
{ message = "^chore\\(pr\\)", skip = true },
{ message = "^chore\\(pull\\)", skip = true },
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers
filter_commits = false
# regex for matching git tags
# tag_pattern = "v[0-9].*"
# regex for skipping tags
# skip_tags = ""
# regex for ignoring tags
ignore_tags = ".*-nightly-.*"
# sort the tags topologically
topo_order = false
# sort the commits inside sections by oldest/newest order
sort_commits = "oldest"
# limit the number of commits included in the changelog.
# limit_commits = 42

View File

@@ -8,6 +8,5 @@ coverage:
ignore:
- "**/error*.rs" # ignore all error.rs files
- "tests/runner/*.rs" # ignore integration test runner
- "tests-integration/**/*.rs" # ignore integration tests
comment: # this is a top-level key
layout: "diff"

View File

@@ -1,31 +0,0 @@
# Configurations
- [Configurations](#configurations)
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
## Distributed Mode
### Frontend
{{ toml2docs "./frontend.example.toml" }}
### Metasrv
{{ toml2docs "./metasrv.example.toml" }}
### Datanode
{{ toml2docs "./datanode.example.toml" }}
### Flownode
{{ toml2docs "./flownode.example.toml"}}

View File

@@ -1,547 +0,0 @@
# Configurations
- [Configurations](#configurations)
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
- [Flownode](#flownode)
## Standalone Mode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | Unset | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics.<br/>**It's only used when the provider is `kafka`**. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`. |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
## Distributed Mode
### Frontend
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
| `mysql.tls.key_path` | String | Unset | Private key file path. |
| `mysql.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `postgres` | -- | -- | PostgresSQL server options. |
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
| `postgres.tls.key_path` | String | Unset | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Metasrv
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `backend` | String | `EtcdStore` | The datastore for meta server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | Operation timeout. |
| `datanode.client.connect_timeout` | String | `10s` | Connect server timeout. |
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Datanode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.recovery_parallelism` | Integer | `2` | Parallelism during WAL recovery. |
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
| `storage.secret_access_key` | String | Unset | The secret access key of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3`**. |
| `storage.access_key_secret` | String | Unset | The secret access key of the aliyun account.<br/>**It's only used when the storage type is `Oss`**. |
| `storage.account_name` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.account_key` | String | Unset | The account key of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.scope` | String | Unset | The scope of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential_path` | String | Unset | The credential path of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.credential` | String | Unset | The credential of the google cloud storage.<br/>**It's only used when the storage type is `Gcs`**. |
| `storage.container` | String | Unset | The container of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
| `region_engine.mito.max_background_purges` | Integer | Auto | Max number of running background purge jobs (default: number of cpu cores). |
| `region_engine.mito.auto_flush_interval` | String | `1h` | Interval to auto flush a region if it has not flushed yet. |
| `region_engine.mito.global_write_buffer_size` | String | Auto | Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB. |
| `region_engine.mito.global_write_buffer_reject_size` | String | Auto | Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size` |
| `region_engine.mito.sst_meta_cache_size` | String | Auto | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
### Flownode
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |

View File

@@ -1,652 +1,60 @@
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## The datanode identifier and should be unique in the cluster.
## @toml2docs:none-default
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
# Whether to use in-memory catalog, see `standalone.example.toml`.
enable_memory_catalog = false
# The datanode identifier, should be unique.
node_id = 42
## Start services after regions have obtained leases.
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## Deprecated, use `grpc.addr` instead.
## @toml2docs:none-default
# gRPC server address, "127.0.0.1:3001" by default.
rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead.
## @toml2docs:none-default
# Hostname of this node.
rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead.
## @toml2docs:none-default
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:3001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
# Metasrv client options.
[meta_client_options]
# Metasrv address list.
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
# Operation timeout in milliseconds, 3000 by default.
timeout_millis = 3000
# Connect server timeout in milliseconds, 5000 by default.
connect_timeout_millis = 5000
# `TCP_NODELAY` option for accepted connections, true by default.
tcp_nodelay = true
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The WAL options.
# WAL options, see `standalone.example.toml`.
[wal]
## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine"
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB"
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB"
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
file_size = "1GB"
purge_threshold = "50GB"
purge_interval = "10m"
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
read_batch_size = 128
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
sync_write = false
## Whether to reuse logically truncated log files.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Whether to enable WAL index creation.
## **It's only used when the provider is `kafka`**.
create_index = true
## The interval for dumping WAL indexes.
## **It's only used when the provider is `kafka`**.
dump_index_interval = "60s"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
# Storage options, see `standalone.example.toml`.
[storage]
## The working home directory.
data_home = "/tmp/greptimedb/"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
type = "File"
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## @toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes.
## @toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
## The Mito engine options.
[region_engine.mito]
## Number of region workers.
#+ num_workers = 8
## Request channel size of each worker.
worker_channel_size = 128
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
## Max number of running background flush jobs (default: 1/2 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## @toml2docs:none-default="Auto"
#+ global_write_buffer_size = "1GB"
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`
## @toml2docs:none-default="Auto"
#+ global_write_buffer_reject_size = "2GB"
## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## @toml2docs:none-default="Auto"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"
data_dir = "/tmp/greptimedb/data/"
# Compaction options, see `standalone.example.toml`.
[storage.compaction]
max_inflight_tasks = 4
max_files_in_level0 = 8
max_purge_tasks = 32
# Storage manifest options
[storage.manifest]
# Region checkpoint actions margin.
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Procedure storage options, see `standalone.example.toml`.
# [procedure.store]
# type = "File"
# data_dir = "/tmp/greptimedb/procedure/"
# max_retry_times = 3
# retry_delay = "500ms"

View File

@@ -1,108 +0,0 @@
## The running mode of the flownode. It can be `standalone` or `distributed`.
mode = "distributed"
## The flownode identifier and should be unique in the cluster.
## @toml2docs:none-default
node_id = 14
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
addr = "127.0.0.1:6800"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 2
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
interval = "3s"
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,237 +1,58 @@
## The default timezone of the server.
## @toml2docs:none-default
default_timezone = "UTC"
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The heartbeat options.
[heartbeat]
## Interval for sending heartbeat messages to the metasrv.
interval = "18s"
## Interval for retrying to send heartbeat messages to the metasrv.
retry_interval = "3s"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
# HTTP server options, see `standalone.example.toml`.
[http_options]
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
# gRPC server options, see `standalone.example.toml`.
[grpc_options]
addr = "127.0.0.1:4001"
## The hostname advertised to the metasrv,
## and used for connections from outside the host
hostname = "127.0.0.1"
## The number of server worker threads.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
enable = true
## The addr to bind the MySQL server.
# MySQL server options, see `standalone.example.toml`.
[mysql_options]
addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2
# MySQL server TLS options.
[mysql.tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
# MySQL server TLS options, see `standalone.example.toml`.
[mysql_options.tls]
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
## PostgresSQL server options.
[postgres]
## Whether to enable
enable = true
## The addr to bind the PostgresSQL server.
# PostgresSQL server options, see `standalone.example.toml`.
[postgres_options]
addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
# PostgresSQL server TLS options, see `standalone.example.toml`.
[postgres_options.tls]
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
# OpenTSDB protocol options, see `standalone.example.toml`.
[opentsdb_options]
addr = "127.0.0.1:4242"
runtime_size = 2
## OpenTSDB protocol options.
[opentsdb]
## Whether to enable OpenTSDB put in HTTP API.
# InfluxDB protocol options, see `standalone.example.toml`.
[influxdb_options]
enable = true
## InfluxDB protocol options.
[influxdb]
## Whether to enable InfluxDB protocol in HTTP API.
# Prometheus protocol options, see `standalone.example.toml`.
[prometheus_options]
enable = true
## Prometheus remote storage options
[prom_store]
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
# Prometheus protocol options, see `standalone.example.toml`.
[prom_options]
addr = "127.0.0.1:4004"
## The metasrv client options.
[meta_client]
## The addresses of the metasrv.
# Metasrv client options, see `datanode.example.toml`.
[meta_client_options]
metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout.
timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout.
ddl_timeout = "10s"
## Connect server timeout.
connect_timeout = "1s"
## `TCP_NODELAY` option for accepted connections.
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = true
## The configuration about the cache of the metadata.
metadata_cache_max_capacity = 100000
## TTL of the metadata cache.
metadata_cache_ttl = "10m"
# TTI of the metadata cache.
metadata_cache_tti = "5m"
## Datanode options.
[datanode]
## Datanode client options.
[datanode.client]
connect_timeout = "10s"
tcp_nodelay = true
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,224 +1,15 @@
## The working home directory.
data_home = "/tmp/metasrv/"
## The bind address of metasrv.
# The bind address of metasrv, "127.0.0.1:3002" by default.
bind_addr = "127.0.0.1:3002"
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
# The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
server_addr = "127.0.0.1:3002"
## Store server address default to etcd store.
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
## Datanode selector type.
## - `round_robin` (default value)
## - `lease_based`
## - `load_based`
## For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
selector = "round_robin"
## Store data in memory.
# Datanode lease in seconds, 15 seconds by default.
datanode_lease_secs = 15
# Datanode selector type.
# - "LeaseBased" (default value).
# - "LoadBased"
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
selector = "LeaseBased"
# Store data in memory, false by default.
use_memory_store = false
## Whether to enable greptimedb telemetry.
enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## Whether to enable region failover.
## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL
## - Using shared storage (e.g., s3).
enable_region_failover = false
## The datastore for meta server.
backend = "EtcdStore"
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## Procedure storage options.
[procedure]
## Procedure max retry time.
max_retry_times = 12
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
## Auto split large value
## GreptimeDB procedure uses etcd as the default metadata storage backend.
## The etcd the maximum size of any request is 1.5 MiB
## 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB"
# Failure detectors options.
[failure_detector]
## The threshold value used by the failure detector to determine failure conditions.
threshold = 8.0
## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
min_std_deviation = "100ms"
## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
acceptable_heartbeat_pause = "10000ms"
## The initial estimate of the heartbeat interval used by the failure detector.
first_heartbeat_estimate = "1000ms"
## Datanode options.
[datanode]
## Datanode client options.
[datanode.client]
## Operation timeout.
timeout = "10s"
## Connect server timeout.
connect_timeout = "10s"
## `TCP_NODELAY` option for accepted connections.
tcp_nodelay = true
[wal]
# Available wal providers:
# - `raft_engine` (default): there're none raft-engine wal config since metasrv only involves in remote wal currently.
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
provider = "raft_engine"
# Kafka wal config.
## The broker endpoints of the Kafka cluster.
broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"

View File

@@ -1,696 +1,130 @@
## The running mode of the datanode. It can be `standalone` or `distributed`.
# Node running mode, "standalone" or "distributed".
mode = "standalone"
# Whether to use in-memory catalog, `false` by default.
enable_memory_catalog = false
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The default timezone of the server.
## @toml2docs:none-default
default_timezone = "UTC"
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0
## The runtime options.
#+ [runtime]
## The number of threads to execute the runtime for global read operations.
#+ global_rt_size = 8
## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4
## The HTTP server options.
[http]
## The address to bind the HTTP server.
# HTTP server options.
[http_options]
# Server address, "127.0.0.1:4000" by default.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
# HTTP request timeout, 30s by default.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
# gRPC server options.
[grpc_options]
# Server address, "127.0.0.1:4001" by default.
addr = "127.0.0.1:4001"
## The number of server worker threads.
# The number of server worker threads, 8 by default.
runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options.
[mysql]
## Whether to enable.
enable = true
## The addr to bind the MySQL server.
# MySQL server options.
[mysql_options]
# Server address, "127.0.0.1:4002" by default.
addr = "127.0.0.1:4002"
## The number of server worker threads.
# The number of server worker threads, 2 by default.
runtime_size = 2
# MySQL server TLS options.
[mysql.tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - `disable` (default value)
## - `prefer`
## - `require`
## - `verify-ca`
## - `verify-full`
[mysql_options.tls]
# TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
# - "disable" (default value)
# - "prefer"
# - "require"
# - "verify-ca"
# - "verify-full"
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
# Certificate file path.
cert_path = ""
## Private key file path.
## @toml2docs:none-default
# Private key file path.
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
## PostgresSQL server options.
[postgres]
## Whether to enable
enable = true
## The addr to bind the PostgresSQL server.
# PostgresSQL server options.
[postgres_options]
# Server address, "127.0.0.1:4003" by default.
addr = "127.0.0.1:4003"
## The number of server worker threads.
# The number of server worker threads, 2 by default.
runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
## TLS mode.
# PostgresSQL server TLS options, see `[mysql_options.tls]` section.
[postgres_options.tls]
# TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
# certificate file path.
cert_path = ""
## Private key file path.
## @toml2docs:none-default
# private key file path.
key_path = ""
## Watch for Certificate and key file change and auto reload
watch = false
# OpenTSDB protocol options.
[opentsdb_options]
# OpenTSDB telnet API server address, "127.0.0.1:4242" by default.
addr = "127.0.0.1:4242"
# The number of server worker threads, 2 by default.
runtime_size = 2
## OpenTSDB protocol options.
[opentsdb]
## Whether to enable OpenTSDB put in HTTP API.
# InfluxDB protocol options.
[influxdb_options]
# Whether to enable InfluxDB protocol in HTTP API, true by default.
enable = true
## InfluxDB protocol options.
[influxdb]
## Whether to enable InfluxDB protocol in HTTP API.
# Prometheus protocol options.
[prometheus_options]
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
## Prometheus remote storage options
[prom_store]
## Whether to enable Prometheus remote write and read in HTTP API.
enable = true
## Whether to store the data from Prometheus remote write in metric engine.
with_metric_engine = true
# Prom protocol options.
[prom_options]
# Prometheus API server address, "127.0.0.1:4004" by default.
addr = "127.0.0.1:4004"
## The WAL options.
# WAL options.
[wal]
## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka.
provider = "raft_engine"
## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default
# WAL data directory.
dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**.
file_size = "256MB"
## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
purge_threshold = "4GB"
## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**.
# WAL file size in bytes.
file_size = "1GB"
# WAL purge threshold in bytes.
purge_threshold = "50GB"
# WAL purge interval in seconds.
purge_interval = "10m"
## The read batch size.
## **It's only used when the provider is `raft_engine`**.
# WAL read batch size.
read_batch_size = 128
## Whether to use sync write.
## **It's only used when the provider is `raft_engine`**.
# Whether to sync log file after every write.
sync_write = false
## Whether to reuse logically truncated log files.
## **It's only used when the provider is `raft_engine`**.
enable_log_recycle = true
## Whether to pre-create log files on start up.
## **It's only used when the provider is `raft_engine`**.
prefill_log_files = false
## Duration for fsyncing log files.
## **It's only used when the provider is `raft_engine`**.
sync_period = "10s"
## Parallelism during WAL recovery.
recovery_parallelism = 2
## The Kafka broker endpoints.
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
auto_create_topics = true
## Number of topics.
## **It's only used when the provider is `kafka`**.
num_topics = 64
## Topic selector type.
## Available selector types:
## - `round_robin` (default)
## **It's only used when the provider is `kafka`**.
selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition.
## **It's only used when the provider is `kafka`**.
replication_factor = 1
## Above which a topic creation operation will be cancelled.
## **It's only used when the provider is `kafka`**.
create_topic_timeout = "30s"
## The max size of a single producer batch.
## Warning: Kafka has a default limit of 1MB per message in a topic.
## **It's only used when the provider is `kafka`**.
max_batch_bytes = "1MB"
## The consumer wait timeout.
## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**.
##
## This option ensures that when Kafka messages are deleted, the system
## can still successfully replay memtable data without throwing an
## out-of-range error.
## However, enabling this option might lead to unexpected data loss,
## as the system will skip over missing entries instead of treating
## them as critical errors.
overwrite_entry_start_id = false
# The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**.
# Available SASL mechanisms:
# - `PLAIN`
# - `SCRAM-SHA-256`
# - `SCRAM-SHA-512`
# [wal.sasl]
# type = "SCRAM-SHA-512"
# username = "user_kafka"
# password = "secret"
# The Kafka TLS configuration.
# **It's only used when the provider is `kafka`**.
# [wal.tls]
# server_ca_cert_path = "/path/to/server_cert"
# client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key"
## Metadata storage options.
[metadata_store]
## Kv file size in bytes.
file_size = "256MB"
## Kv purge threshold.
purge_threshold = "4GB"
## Procedure storage options.
[procedure]
## Procedure max retry time.
max_retry_times = 3
## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms"
# Example of using S3 as the storage.
# [storage]
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# Example of using Oss as the storage.
# [storage]
# type = "Oss"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# access_key_secret = "123456"
# endpoint = "https://oss-cn-hangzhou.aliyuncs.com"
# Example of using Azblob as the storage.
# [storage]
# type = "Azblob"
# container = "greptimedb"
# root = "data"
# account_name = "test"
# account_key = "123456"
# endpoint = "https://greptimedb.blob.core.windows.net"
# sas_token = ""
# Example of using Gcs as the storage.
# [storage]
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The data storage options.
# Storage options.
[storage]
## The working home directory.
data_home = "/tmp/greptimedb/"
## The storage type used to store the data.
## - `File`: the data is stored in the local file system.
## - `S3`: the data is stored in the S3 object storage.
## - `Gcs`: the data is stored in the Google Cloud Storage.
## - `Azblob`: the data is stored in the Azure Blob Storage.
## - `Oss`: the data is stored in the Aliyun OSS.
# Storage type.
type = "File"
## Cache configuration for object storage such as 'S3' etc.
## The local file cache directory.
## @toml2docs:none-default
cache_path = "/path/local_cache"
## The local file cache capacity in bytes.
## @toml2docs:none-default
cache_capacity = "256MB"
## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
## @toml2docs:none-default
bucket = "greptimedb"
## The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.
## **It's only used when the storage type is `S3`, `Oss` and `Azblob`**.
## @toml2docs:none-default
root = "greptimedb"
## The access key id of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3` and `Oss`**.
## @toml2docs:none-default
access_key_id = "test"
## The secret access key of the aws account.
## It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.
## **It's only used when the storage type is `S3`**.
## @toml2docs:none-default
secret_access_key = "test"
## The secret access key of the aliyun account.
## **It's only used when the storage type is `Oss`**.
## @toml2docs:none-default
access_key_secret = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_name = "test"
## The account key of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
account_key = "test"
## The scope of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
scope = "test"
## The credential path of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential_path = "test"
## The credential of the google cloud storage.
## **It's only used when the storage type is `Gcs`**.
## @toml2docs:none-default
credential = "base64-credential"
## The container of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
container = "greptimedb"
## The sas token of the azure account.
## **It's only used when the storage type is `Azblob`**.
## @toml2docs:none-default
sas_token = ""
## The endpoint of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
endpoint = "https://s3.amazonaws.com"
## The region of the S3 service.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
## @toml2docs:none-default
region = "us-west-2"
# Custom storage options
# [[storage.providers]]
# name = "S3"
# type = "S3"
# bucket = "greptimedb"
# root = "data"
# access_key_id = "test"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
# [[storage.providers]]
# name = "Gcs"
# type = "Gcs"
# bucket = "greptimedb"
# root = "data"
# scope = "test"
# credential_path = "123456"
# credential = "base64-credential"
# endpoint = "https://storage.googleapis.com"
## The region engine options. You can configure multiple region engines.
[[region_engine]]
## The Mito engine options.
[region_engine.mito]
## Number of region workers.
#+ num_workers = 8
## Request channel size of each worker.
worker_channel_size = 128
## Max batch size for a worker to handle requests.
worker_request_batch_size = 64
## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10
## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false
## Max number of running background flush jobs (default: 1/2 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_flushes = 4
## Max number of running background compaction jobs (default: 1/4 of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_compactions = 2
## Max number of running background purge jobs (default: number of cpu cores).
## @toml2docs:none-default="Auto"
#+ max_background_purges = 8
## Interval to auto flush a region if it has not flushed yet.
auto_flush_interval = "1h"
## Global write buffer size for all regions. If not set, it's default to 1/8 of OS memory with a max limitation of 1GB.
## @toml2docs:none-default="Auto"
#+ global_write_buffer_size = "1GB"
## Global write buffer size threshold to reject write requests. If not set, it's default to 2 times of `global_write_buffer_size`.
## @toml2docs:none-default="Auto"
#+ global_write_buffer_reject_size = "2GB"
## Cache size for SST metadata. Setting it to 0 to disable the cache.
## If not set, it's default to 1/32 of OS memory with a max limitation of 128MB.
## @toml2docs:none-default="Auto"
#+ sst_meta_cache_size = "128MB"
## Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ vector_cache_size = "512MB"
## Cache size for pages of SST row groups. Setting it to 0 to disable the cache.
## If not set, it's default to 1/8 of OS memory.
## @toml2docs:none-default="Auto"
#+ page_cache_size = "512MB"
## Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
## @toml2docs:none-default
experimental_write_cache_ttl = "8h"
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
## The options for index in Mito engine.
[region_engine.mito.index]
## Auxiliary directory path for the index in filesystem, used to store intermediate files for
## creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.
## The default name for this directory is `index_intermediate` for backward compatibility.
##
## This path contains two subdirectories:
## - `__intm`: for storing intermediate files used during creating index.
## - `staging`: for storing staging files used during searching index.
aux_path = ""
## The max capacity of the staging directory.
staging_size = "2GB"
## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for performing an external sort during index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable]
## Memtable type.
## - `time_series`: time-series memtable
## - `partition_tree`: partition tree memtable (experimental)
type = "time_series"
## The max number of keys in one shard.
## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Enable the file engine.
[region_engine.file]
## The logging options.
[logging]
## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default
level = "info"
## Enable OTLP tracing.
enable_otlp_tracing = false
## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout.
append_stdout = true
## The log format. Can be `text`/`json`.
log_format = "text"
## The maximum amount of log files.
max_log_files = 720
## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio]
default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1"
# Data directory, "/tmp/greptimedb/data" by default.
data_dir = "/tmp/greptimedb/data/"
# Compaction options.
[storage.compaction]
# Max task number that can concurrently run.
max_inflight_tasks = 4
# Max files in level 0 to trigger compaction.
max_files_in_level0 = 8
# Max task number for SST purge task after compaction.
max_purge_tasks = 32
# Storage manifest options
[storage.manifest]
# Region checkpoint actions margin.
# Create a checkpoint every <checkpoint_margin> actions.
checkpoint_margin = 10
# Region manifest logs and checkpoints gc execution duration
gc_duration = '30s'
# Whether to try creating a manifest checkpoint on region opening
checkpoint_on_startup = false
# Procedure storage options.
# Uncomment to enable.
# [procedure.store]
# # Storage type.
# type = "File"
# # Procedure data path.
# data_dir = "/tmp/greptimedb/procedure/"
# # Procedure max retry time.
# max_retry_times = 3
# # Initial retry delay of procedures, increases exponentially
# retry_delay = "500ms"

2
cyborg/.gitignore vendored
View File

@@ -1,2 +0,0 @@
node_modules
.env

View File

@@ -1,79 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEvent} from "@octokit/webhooks-types";
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
import conventionalCommitTypes from 'conventional-commit-types';
import _ from "lodash";
const defaultTypes = Object.keys(conventionalCommitTypes.types)
const breakingChangeLabel = "breaking-change"
// These options are copied from [1].
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
export const parserOpts: Options = {
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
headerCorrespondence: [
'type',
'scope',
'subject'
],
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
revertCorrespondence: ['header', 'hash'],
issuePrefixes: ['#']
}
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
}
const { data: pull_request } = await client.rest.pulls.get({
owner, repo, pull_number: number,
})
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
core.info(`Receive commit: ${JSON.stringify(commit)}`)
if (!commit.type) {
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
}
if (!defaultTypes.includes(commit.type)) {
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
}
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
if (breakingChanges.length > 0) {
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [breakingChangeLabel]
})
}
}
main().catch(handleError)

View File

@@ -1,106 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
// @ts-expect-error moduleResolution:nodenext issue 54523
import {RequestError} from "@octokit/request-error";
const needFollowUpDocs = "[x] This PR requires documentation updates."
const labelDocsNotRequired = "docs-not-required"
const labelDocsRequired = "docs-required"
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const docsClient = obtainClient("DOCS_REPO_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number, actor, title, html_url } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
title: payload.pull_request.title,
html_url: payload.pull_request.html_url,
actor: payload.pull_request.user.login,
}
const followUpDocs = checkPullRequestEvent(payload)
if (followUpDocs) {
core.info("Follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsNotRequired,
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsNotRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired],
})
await docsClient.rest.issues.create({
owner: 'GreptimeTeam',
repo: 'docs',
title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`,
assignee: actor,
}).then((res) => {
core.info(`Created issue ${res.data}`)
})
} else {
core.info("No need to follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsRequired
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
})
}
}
function checkPullRequestEvent(payload: PullRequestEvent) {
switch (payload.action) {
case "opened":
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
case "edited":
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
default:
throw new Error(`${payload.action} is unsupported.`)
}
}
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
// @ts-ignore
return event.pull_request.body?.includes(needFollowUpDocs)
}
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
const previous = event.changes.body?.from.includes(needFollowUpDocs)
const current = event.pull_request.body?.includes(needFollowUpDocs)
// from docs-not-need to docs-required
return (!previous) && current
}
main().catch(handleError)

View File

@@ -1,83 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common"
import {context} from "@actions/github"
import _ from "lodash"
async function main() {
const success = process.env["CI_REPORT_STATUS"] === "true"
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
const {owner, repo} = context.repo
const labels = ['O-ci-failure']
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
labels: labels.join(','),
state: "open",
sort: "created",
direction: "desc",
});
const issue = _.find(issues, (i) => i.title === title);
if (issue) { // exist issue
core.info(`Found previous issue ${issue.html_url}`)
if (!success) {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: failure_comment,
})
} else {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: success_comment,
})
await client.rest.issues.update({
owner,
repo,
issue_number: issue.number,
state: "closed",
state_reason: "completed",
})
}
core.setOutput("html_url", issue.html_url)
} else if (!success) { // create new issue for failure
const issue = await client.rest.issues.create({
owner,
repo,
title,
labels,
body: failure_comment,
})
core.info(`Created issue ${issue.data.html_url}`)
core.setOutput("html_url", issue.data.html_url)
}
}
main().catch(handleError)

View File

@@ -1,73 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {GitHub} from "@actions/github/lib/utils"
import _ from "lodash";
import dayjs from "dayjs";
import {handleError, obtainClient} from "@/common";
async function main() {
const client = obtainClient("GITHUB_TOKEN")
await unassign(client)
}
async function unassign(client: InstanceType<typeof GitHub>) {
const owner = "GreptimeTeam"
const repo = "greptimedb"
const dt = dayjs().subtract(14, 'days');
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
const members = await client.paginate(client.rest.repos.listCollaborators, {
owner,
repo,
permission: "push",
per_page: 100
}).then((members) => members.map((member) => member.login))
core.info(`Members (${members.length}): ${members}`)
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
state: "open",
sort: "created",
direction: "asc",
per_page: 100
})
for (const issue of issues) {
let assignees = [];
if (issue.assignee) {
assignees.push(issue.assignee.login)
}
for (const assignee of issue.assignees) {
assignees.push(assignee.login)
}
assignees = _.uniq(assignees)
assignees = _.difference(assignees, members)
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
await client.rest.issues.removeAssignees({
owner,
repo,
issue_number: issue.number,
assignees: assignees,
})
}
}
}
main().catch(handleError)

View File

@@ -1,26 +0,0 @@
{
"name": "cyborg",
"version": "1.0.0",
"description": "Automator for GreptimeDB Repository Management",
"private": true,
"packageManager": "pnpm@8.15.5",
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
"@octokit/request-error": "^6.1.1",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",
"dayjs": "^1.11.11",
"dotenv": "^16.4.5",
"lodash": "^4.17.21"
},
"devDependencies": {
"@types/conventional-commits-parser": "^5.0.0",
"@types/lodash": "^4.17.0",
"@types/node": "^20.12.7",
"tsconfig-paths": "^4.2.0",
"tsx": "^4.8.2",
"typescript": "^5.4.5"
}
}

612
cyborg/pnpm-lock.yaml generated
View File

@@ -1,612 +0,0 @@
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@actions/core':
specifier: ^1.10.1
version: 1.10.1
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
'@octokit/request-error':
specifier: ^6.1.1
version: 6.1.1
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
conventional-commit-types:
specifier: ^3.0.0
version: 3.0.0
conventional-commits-parser:
specifier: ^5.0.0
version: 5.0.0
dayjs:
specifier: ^1.11.11
version: 1.11.11
dotenv:
specifier: ^16.4.5
version: 16.4.5
lodash:
specifier: ^4.17.21
version: 4.17.21
devDependencies:
'@types/conventional-commits-parser':
specifier: ^5.0.0
version: 5.0.0
'@types/lodash':
specifier: ^4.17.0
version: 4.17.0
'@types/node':
specifier: ^20.12.7
version: 20.12.7
tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0
tsx:
specifier: ^4.8.2
version: 4.8.2
typescript:
specifier: ^5.4.5
version: 5.4.5
packages:
/@actions/core@1.10.1:
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
dependencies:
'@actions/http-client': 2.2.1
uuid: 8.3.2
dev: false
/@actions/github@6.0.0:
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
dependencies:
'@actions/http-client': 2.2.1
'@octokit/core': 5.2.0
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
dev: false
/@actions/http-client@2.2.1:
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
dependencies:
tunnel: 0.0.6
undici: 5.28.4
dev: false
/@esbuild/aix-ppc64@0.20.2:
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [aix]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm64@0.20.2:
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm@0.20.2:
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
engines: {node: '>=12'}
cpu: [arm]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-x64@0.20.2:
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
engines: {node: '>=12'}
cpu: [x64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-arm64@0.20.2:
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
engines: {node: '>=12'}
cpu: [arm64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-x64@0.20.2:
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
engines: {node: '>=12'}
cpu: [x64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-arm64@0.20.2:
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
engines: {node: '>=12'}
cpu: [arm64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-x64@0.20.2:
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
engines: {node: '>=12'}
cpu: [x64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm64@0.20.2:
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
engines: {node: '>=12'}
cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm@0.20.2:
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
engines: {node: '>=12'}
cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ia32@0.20.2:
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
engines: {node: '>=12'}
cpu: [ia32]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-loong64@0.20.2:
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
engines: {node: '>=12'}
cpu: [loong64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-mips64el@0.20.2:
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
engines: {node: '>=12'}
cpu: [mips64el]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ppc64@0.20.2:
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-riscv64@0.20.2:
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
engines: {node: '>=12'}
cpu: [riscv64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-s390x@0.20.2:
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
engines: {node: '>=12'}
cpu: [s390x]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-x64@0.20.2:
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
engines: {node: '>=12'}
cpu: [x64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/netbsd-x64@0.20.2:
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [netbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/openbsd-x64@0.20.2:
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [openbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/sunos-x64@0.20.2:
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
engines: {node: '>=12'}
cpu: [x64]
os: [sunos]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-arm64@0.20.2:
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
engines: {node: '>=12'}
cpu: [arm64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-ia32@0.20.2:
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
engines: {node: '>=12'}
cpu: [ia32]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-x64@0.20.2:
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@fastify/busboy@2.1.1:
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
engines: {node: '>=14'}
dev: false
/@octokit/auth-token@4.0.0:
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
engines: {node: '>= 18'}
dev: false
/@octokit/core@5.2.0:
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/auth-token': 4.0.0
'@octokit/graphql': 7.1.0
'@octokit/request': 8.4.0
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
before-after-hook: 2.2.3
universal-user-agent: 6.0.1
dev: false
/@octokit/endpoint@9.0.5:
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/graphql@7.1.0:
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
engines: {node: '>= 18'}
dependencies:
'@octokit/request': 8.4.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/openapi-types@20.0.0:
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
dev: false
/@octokit/openapi-types@22.2.0:
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
dev: false
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/request-error@5.1.0:
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
deprecation: 2.3.1
once: 1.4.0
dev: false
/@octokit/request-error@6.1.1:
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
dev: false
/@octokit/request@8.4.0:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/endpoint': 9.0.5
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/types@12.6.0:
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
dependencies:
'@octokit/openapi-types': 20.0.0
dev: false
/@octokit/types@13.5.0:
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
dependencies:
'@octokit/openapi-types': 22.2.0
dev: false
/@octokit/webhooks-types@7.5.1:
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
dev: false
/@types/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
dependencies:
'@types/node': 20.12.7
dev: true
/@types/lodash@4.17.0:
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
dev: true
/@types/node@20.12.7:
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
dependencies:
undici-types: 5.26.5
dev: true
/JSONStream@1.3.5:
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
hasBin: true
dependencies:
jsonparse: 1.3.1
through: 2.3.8
dev: false
/before-after-hook@2.2.3:
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
dev: false
/conventional-commit-types@3.0.0:
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
dev: false
/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
engines: {node: '>=16'}
hasBin: true
dependencies:
JSONStream: 1.3.5
is-text-path: 2.0.0
meow: 12.1.1
split2: 4.2.0
dev: false
/dayjs@1.11.11:
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
dev: false
/deprecation@2.3.1:
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
dev: false
/dotenv@16.4.5:
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
engines: {node: '>=12'}
dev: false
/esbuild@0.20.2:
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
engines: {node: '>=12'}
hasBin: true
requiresBuild: true
optionalDependencies:
'@esbuild/aix-ppc64': 0.20.2
'@esbuild/android-arm': 0.20.2
'@esbuild/android-arm64': 0.20.2
'@esbuild/android-x64': 0.20.2
'@esbuild/darwin-arm64': 0.20.2
'@esbuild/darwin-x64': 0.20.2
'@esbuild/freebsd-arm64': 0.20.2
'@esbuild/freebsd-x64': 0.20.2
'@esbuild/linux-arm': 0.20.2
'@esbuild/linux-arm64': 0.20.2
'@esbuild/linux-ia32': 0.20.2
'@esbuild/linux-loong64': 0.20.2
'@esbuild/linux-mips64el': 0.20.2
'@esbuild/linux-ppc64': 0.20.2
'@esbuild/linux-riscv64': 0.20.2
'@esbuild/linux-s390x': 0.20.2
'@esbuild/linux-x64': 0.20.2
'@esbuild/netbsd-x64': 0.20.2
'@esbuild/openbsd-x64': 0.20.2
'@esbuild/sunos-x64': 0.20.2
'@esbuild/win32-arm64': 0.20.2
'@esbuild/win32-ia32': 0.20.2
'@esbuild/win32-x64': 0.20.2
dev: true
/fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: true
optional: true
/get-tsconfig@4.7.3:
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
dependencies:
resolve-pkg-maps: 1.0.0
dev: true
/is-text-path@2.0.0:
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
engines: {node: '>=8'}
dependencies:
text-extensions: 2.4.0
dev: false
/json5@2.2.3:
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
engines: {node: '>=6'}
hasBin: true
dev: true
/jsonparse@1.3.1:
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
engines: {'0': node >= 0.2.0}
dev: false
/lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
dev: false
/meow@12.1.1:
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
engines: {node: '>=16.10'}
dev: false
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
dev: true
/once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: false
/resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
dev: true
/split2@4.2.0:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
dev: false
/strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
dev: true
/text-extensions@2.4.0:
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
engines: {node: '>=8'}
dev: false
/through@2.3.8:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false
/tsconfig-paths@4.2.0:
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
engines: {node: '>=6'}
dependencies:
json5: 2.2.3
minimist: 1.2.8
strip-bom: 3.0.0
dev: true
/tsx@4.8.2:
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
engines: {node: '>=18.0.0'}
hasBin: true
dependencies:
esbuild: 0.20.2
get-tsconfig: 4.7.3
optionalDependencies:
fsevents: 2.3.3
dev: true
/tunnel@0.0.6:
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
dev: false
/typescript@5.4.5:
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
engines: {node: '>=14.17'}
hasBin: true
dev: true
/undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
dev: true
/undici@5.28.4:
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
engines: {node: '>=14.0'}
dependencies:
'@fastify/busboy': 2.1.1
dev: false
/universal-user-agent@6.0.1:
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
dev: false
/uuid@8.3.2:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
hasBin: true
dev: false
/wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: false

View File

@@ -1,30 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {config} from "dotenv";
import {getOctokit} from "@actions/github";
import {GitHub} from "@actions/github/lib/utils";
export function handleError(err: any): void {
console.error(err)
core.setFailed(`Unhandled error: ${err}`)
}
export function obtainClient(token: string): InstanceType<typeof GitHub> {
config()
return getOctokit(process.env[token])
}

View File

@@ -1,14 +0,0 @@
{
"ts-node": {
"require": ["tsconfig-paths/register"]
},
"compilerOptions": {
"module": "NodeNext",
"moduleResolution": "NodeNext",
"target": "ES6",
"paths": {
"@/*": ["./src/*"]
},
"resolveJsonModule": true,
}
}

36
docker/Dockerfile Normal file
View File

@@ -0,0 +1,36 @@
FROM ubuntu:22.04 as builder
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
build-essential \
pkg-config \
python3 \
python3-dev \
&& pip install pyarrow
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode.
COPY . .
RUN cargo build --release
# Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
WORKDIR /greptime
COPY --from=builder /greptimedb/target/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

57
docker/aarch64/Dockerfile Normal file
View File

@@ -0,0 +1,57 @@
FROM ubuntu:22.04 as builder
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies.
RUN apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
build-essential \
pkg-config \
wget
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install cross platform toolchain
RUN apt-get -y update && \
apt-get -y install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu && \
apt-get install binutils-aarch64-linux-gnu
COPY ./docker/aarch64/compile-python.sh ./docker/aarch64/
RUN chmod +x ./docker/aarch64/compile-python.sh && \
./docker/aarch64/compile-python.sh
COPY ./rust-toolchain.toml .
# Install rustup target for cross compiling.
RUN rustup target add aarch64-unknown-linux-gnu
COPY . .
# Update dependency, using separate `RUN` to separate cache
RUN cargo fetch
# This three env var is set in script, so I set it manually in dockerfile.
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/
ENV LIBRARY_PATH=$LIBRARY_PATH:/usr/local/lib/
ENV PY_INSTALL_PATH=/greptimedb/python_arm64_build
# Set the environment variable for cross compiling and compile it
# cross compiled python is `python3` in path, but pyo3 need `python` in path so alias it
# Build the project in release mode.
RUN export PYO3_CROSS_LIB_DIR=$PY_INSTALL_PATH/lib && \
alias python=python3 && \
cargo build --target aarch64-unknown-linux-gnu --release -F pyo3_backend
# Exporting the binary to the clean image
FROM ubuntu:22.04 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certificates
WORKDIR /greptime
COPY --from=builder /greptimedb/target/aarch64-unknown-linux-gnu/release/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
set -e
# this script will download Python source code, compile it, and install it to /usr/local/lib
# then use this python to compile cross-compiled python for aarch64
ARCH=$1
PYTHON_VERSION=3.10.10
PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
function download_python_source_code() {
wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
tar -xvf Python-$PYTHON_VERSION.tgz
}
function compile_for_amd64_platform() {
mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
echo "Compiling for amd64 platform..."
./configure \
--prefix="$PYTHON_INSTALL_PATH_AMD64" \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make install
}
# explain Python compile options here a bit:s
# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
# build: the machine you are building on, host: the machine you will run the compiled program on
# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
# ac_cv_have_long_long_format=yes: target platform supports long long type
# disable-ipv6: disable ipv6 support, we don't need it in here
# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
function compile_for_aarch64_platform() {
export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
echo "Compiling for aarch64 platform..."
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
echo "LIBRARY_PATH: $LIBRARY_PATH"
echo "PATH: $PATH"
./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
--prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
AR=aarch64-linux-gnu-ar \
LD=aarch64-linux-gnu-ld \
RANLIB=aarch64-linux-gnu-ranlib \
--enable-shared \
ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
ac_cv_have_long_long_format=yes \
--disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
make
make altinstall
}
# Main script starts here.
download_python_source_code
# Enter the python source code directory.
cd $PYTHON_SOURCE_DIR || exit 1
# Build local python first, then build cross-compiled python.
compile_for_amd64_platform
# Clean the build directory.
make clean && make distclean
# Cross compile python for aarch64.
if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
compile_for_aarch64_platform
fi

View File

@@ -1,54 +0,0 @@
FROM centos:7 as builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Install dependencies
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
# Install protoc
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=/out/target
# Export the binary to the clean image.
FROM centos:7 as base
ARG OUTPUT_DIR
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -1,62 +0,0 @@
FROM ubuntu:20.04 as builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config \
python3.10 \
python3.10-dev \
python3-pip
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=/out/target
# Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
ARG OUTPUT_DIR
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
-y install ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

19
docker/ci/Dockerfile Normal file
View File

@@ -0,0 +1,19 @@
FROM ubuntu:22.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
python3.10 \
python3.10-dev \
python3-pip
COPY requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -1,20 +0,0 @@
FROM centos:7
# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
rh-python38-python-devel
ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

Some files were not shown because too many files have changed in this diff Show More