Compare commits

..

1 Commits

Author SHA1 Message Date
luofucong
1e37847f48 x 2025-01-02 15:21:29 +08:00
1040 changed files with 37408 additions and 54507 deletions

View File

@@ -3,12 +3,3 @@ linker = "aarch64-linux-gnu-gcc"
[alias] [alias]
sqlness = "run --bin sqlness-runner --" sqlness = "run --bin sqlness-runner --"
[unstable.git]
shallow_index = true
shallow_deps = true
[unstable.gitoxide]
fetch = true
checkout = true
list_files = true
internal_use_git2 = false

View File

@@ -41,13 +41,6 @@ runs:
username: ${{ inputs.dockerhub-image-registry-username }} username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }} password: ${{ inputs.dockerhub-image-registry-token }}
- name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Build and push dev-builder-ubuntu image - name: Build and push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }} if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
@@ -76,8 +69,8 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=android \ BASE_IMAGE=android \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -34,8 +34,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
runs: runs:
using: composite using: composite
steps: steps:
@@ -47,11 +47,7 @@ runs:
password: ${{ inputs.image-registry-password }} password: ${{ inputs.image-registry-password }}
- name: Set up qemu for multi-platform builds - name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v2
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up buildx - name: Set up buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2

View File

@@ -22,8 +22,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
dev-mode: dev-mode:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false

View File

@@ -48,11 +48,12 @@ runs:
path: /tmp/greptime-*.log path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime # Builds standard greptime binary - name: Build greptime
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
with: with:
base-image: ubuntu base-image: ubuntu
features: servers/dashboard,pg_kvbackend features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }} cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
@@ -70,7 +71,7 @@ runs:
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64. if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
with: with:
base-image: centos base-image: centos
features: servers/dashboard,pg_kvbackend features: servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }} cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}

View File

@@ -9,8 +9,8 @@ runs:
steps: steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to: # Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR} # ${WORKING_DIR}
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ... # ...

View File

@@ -51,8 +51,8 @@ inputs:
required: true required: true
upload-to-s3: upload-to-s3:
description: Upload to S3 description: Upload to S3
required: true required: false
default: 'false' default: 'true'
artifacts-dir: artifacts-dir:
description: Directory to store artifacts description: Directory to store artifacts
required: false required: false
@@ -77,21 +77,13 @@ runs:
with: with:
path: ${{ inputs.artifacts-dir }} path: ${{ inputs.artifacts-dir }}
- name: Install s5cmd
shell: bash
run: |
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
sudo mv s5cmd /usr/local/bin/
sudo chmod +x /usr/local/bin/s5cmd
- name: Release artifacts to cn region - name: Release artifacts to cn region
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }} if: ${{ inputs.upload-to-s3 == 'true' }}
env: env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }} AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }} AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_REGION: ${{ inputs.aws-cn-region }} AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }} UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with: with:
max_attempts: ${{ inputs.upload-max-retry-times }} max_attempts: ${{ inputs.upload-max-retry-times }}

View File

@@ -30,9 +30,9 @@ runs:
done done
# The compressed artifacts will use the following layout: # The compressed artifacts will use the following layout:
# greptime-linux-amd64-v0.3.0sha256sum # greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-v0.3.0.tar.gz # greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-v0.3.0 # greptime-linux-amd64-pyo3-v0.3.0
# └── greptime # └── greptime
- name: Compress artifacts and calculate checksum - name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}

View File

@@ -27,13 +27,13 @@ function upload_artifacts() {
# ├── latest-version.txt # ├── latest-version.txt
# ├── latest-nightly-version.txt # ├── latest-nightly-version.txt
# ├── v0.1.0 # ├── v0.1.0
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum # │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz # │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0 # └── v0.2.0
# ├── greptime-darwin-amd64-v0.2.0.sha256sum # ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-v0.2.0.tar.gz # └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
s5cmd cp \ aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")" "$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done done
} }
@@ -45,7 +45,7 @@ function update_version_info() {
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt" echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt echo "$VERSION" > latest-version.txt
s5cmd cp \ aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt" latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi fi
@@ -53,7 +53,7 @@ function update_version_info() {
if [[ "$VERSION" == *"nightly"* ]]; then if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt" echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt echo "$VERSION" > latest-nightly-version.txt
s5cmd cp \ aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt" latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi fi
fi fi

View File

@@ -17,8 +17,6 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,6 +1,9 @@
name: Check Dependencies name: Check Dependencies
on: on:
push:
branches:
- main
pull_request: pull_request:
branches: branches:
- main - main
@@ -12,8 +15,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Rust - name: Set up Rust
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: actions-rust-lang/setup-rust-toolchain@v1

View File

@@ -76,9 +76,15 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
IMAGE_NAME: greptimedb-dev
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'. # The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -101,7 +107,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -156,7 +161,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -164,7 +168,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -189,7 +192,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -197,7 +199,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -225,14 +226,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
@@ -257,14 +257,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -274,7 +273,6 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: false
dev-mode: true # Only build the standard images(exclude centos images). dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry. push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3. update-version-info: false # Don't update the version info in S3.
@@ -293,7 +291,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -319,7 +316,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -338,16 +334,10 @@ jobs:
release-images-to-dockerhub release-images-to-dockerhub
] ]
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,6 +1,4 @@
on: on:
schedule:
- cron: "0 15 * * 1-5"
merge_group: merge_group:
pull_request: pull_request:
types: [ opened, synchronize, reopened, ready_for_review ] types: [ opened, synchronize, reopened, ready_for_review ]
@@ -26,8 +24,6 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
- name: Check the config docs - name: Check the config docs
run: | run: |
@@ -40,8 +36,6 @@ jobs:
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
@@ -49,12 +43,10 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04 ] os: [ windows-2022, ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -65,8 +57,6 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Clippy` job # Shares with `Clippy` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo check - name: Run cargo check
run: cargo check --locked --workspace --all-targets run: cargo check --locked --workspace --all-targets
@@ -76,9 +66,12 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo - name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked --force run: cargo +stable install taplo-cli --version ^0.9 --locked --force
- name: Run taplo - name: Run taplo
@@ -93,8 +86,6 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -103,15 +94,13 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-binaries" shared-key: "build-binaries"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin --force
- name: Build greptime binaries - name: Build greptime binaries
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries - name: Pack greptime binaries
shell: bash shell: bash
run: | run: |
@@ -149,12 +138,15 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
@@ -204,12 +196,15 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
@@ -252,8 +247,6 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -262,15 +255,13 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-greptime-ci" shared-key: "build-greptime-ci"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin --force
- name: Build greptime bianry - name: Build greptime bianry
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary - name: Pack greptime binary
shell: bash shell: bash
run: | run: |
@@ -311,8 +302,6 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- if: matrix.mode.minio - if: matrix.mode.minio
@@ -328,6 +317,11 @@ jobs:
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
@@ -455,8 +449,6 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh - name: Setup Chaos Mesh
@@ -474,6 +466,11 @@ jobs:
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
@@ -576,18 +573,13 @@ jobs:
- name: "Remote WAL" - name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092" opts: "-w kafka -k 127.0.0.1:9092"
kafka: true kafka: true
- name: "Pg Kvbackend"
opts: "--setup-pg"
kafka: false
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup kafka server name: Setup kafka server
working-directory: tests-integration/fixtures working-directory: tests-integration/fixtures/kafka
run: docker compose up -d --wait kafka run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -611,14 +603,17 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: rustfmt components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format - name: Check format
run: make fmt-check run: make fmt-check
@@ -628,8 +623,6 @@ jobs:
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -642,106 +635,55 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Check` job # Shares with `Check` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy - name: Run cargo clippy
run: make clippy run: make clippy
conflict-check:
name: Check for conflict
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Merge Conflict Finder
uses: olivernybroe/action-conflict-finder@v4.0
test:
if: github.event_name != 'merge_group'
runs-on: ubuntu-22.04-arm
timeout-minutes: 60
needs: [conflict-check, clippy, fmt]
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Run nextest cases
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
RUST_MIN_STACK: 8388608 # 8MB
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
coverage: coverage:
if: github.event_name == 'merge_group' if: github.event.pull_request.draft == false
runs-on: ubuntu-20.04-8-cores runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60 timeout-minutes: 60
needs: [clippy, fmt]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1 - uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain - name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: llvm-tools components: llvm-tools-preview
cache: false
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
# Shares cross multiple jobs # Shares cross multiple jobs
shared-key: "coverage-test" shared-key: "coverage-test"
save-if: ${{ github.ref == 'refs/heads/main' }} - name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release - name: Install latest nextest release
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov
- name: Setup external services - name: Setup etcd server
working-directory: tests-integration/fixtures working-directory: tests-integration/fixtures/etcd
run: docker compose up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
env: env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold" CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}

View File

@@ -3,21 +3,16 @@ on:
pull_request_target: pull_request_target:
types: [opened, edited] types: [opened, edited]
concurrency: permissions:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} pull-requests: write
cancel-in-progress: true contents: read
jobs: jobs:
docbot: docbot:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
permissions:
pull-requests: write
contents: read
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue - name: Maybe Follow Up Docs Issue
working-directory: cyborg working-directory: cyborg

View File

@@ -34,8 +34,6 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
license-header-check: license-header-check:
@@ -43,8 +41,6 @@ jobs:
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
@@ -70,11 +66,6 @@ jobs:
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
test:
runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test (${{ matrix.mode.name }})
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}

View File

@@ -66,6 +66,13 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -88,7 +95,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -141,7 +147,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -163,7 +168,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -189,18 +193,17 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false push-latest-tag: true
- name: Set nightly build result - name: Set nightly build result
id: set-nightly-build-result id: set-nightly-build-result
@@ -223,14 +226,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -240,10 +242,9 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: false
dev-mode: false dev-mode: false
update-version-info: false # Don't update version info in S3. update-version-info: false # Don't update version info in S3.
push-latest-tag: false push-latest-tag: true
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
@@ -259,7 +260,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -285,7 +285,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -304,14 +303,10 @@ jobs:
release-images-to-dockerhub release-images-to-dockerhub
] ]
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -9,6 +9,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
permissions:
issues: write
jobs: jobs:
sqlness-test: sqlness-test:
name: Run sqlness test name: Run sqlness test
@@ -19,7 +22,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check install.sh - name: Check install.sh
run: ./.github/scripts/check-install-script.sh run: ./.github/scripts/check-install-script.sh
@@ -44,14 +46,9 @@ jobs:
name: Sqlness tests on Windows name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores runs-on: windows-2022-8-cores
permissions:
issues: write
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -79,9 +76,6 @@ jobs:
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -115,19 +109,14 @@ jobs:
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
cleanbuild-linux-nix: cleanbuild-linux-nix:
name: Run clean build on Linux runs-on: ubuntu-latest-8-cores
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: cachix/install-nix-action@v27 - uses: cachix/install-nix-action@v27
with: with:
nix_path: nixpkgs=channel:nixos-24.11 nix_path: nixpkgs=channel:nixos-unstable
- run: nix develop --command cargo build - run: nix-shell --pure --run "cargo build"
check-status: check-status:
name: Check status name: Check status
@@ -151,9 +140,6 @@ jobs:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -29,7 +29,7 @@ jobs:
release-dev-builder-images: release-dev-builder-images:
name: Release dev builder images name: Release dev builder images
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job. if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
runs-on: ubuntu-22.04-16-cores runs-on: ubuntu-20.04-16-cores
outputs: outputs:
version: ${{ steps.set-version.outputs.version }} version: ${{ steps.set-version.outputs.version }}
steps: steps:
@@ -37,7 +37,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Configure build image version - name: Configure build image version
id: set-version id: set-version
@@ -63,7 +62,7 @@ jobs:
release-dev-builder-images-ecr: release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR name: Release dev builder images to AWS ECR
runs-on: ubuntu-22.04 runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -86,69 +85,51 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container. release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region name: Release dev builder images to CN region
runs-on: ubuntu-22.04 runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -163,41 +144,29 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -91,7 +91,12 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release. # Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.13.0 NEXT_RELEASE_VERSION: v0.12.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs: jobs:
allocate-runners: allocate-runners:
@@ -117,7 +122,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check Rust toolchain version - name: Check Rust toolchain version
shell: bash shell: bash
@@ -177,7 +181,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -199,7 +202,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -235,7 +237,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-macos-artifacts - uses: ./.github/actions/build-macos-artifacts
with: with:
@@ -275,7 +276,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-windows-artifacts - uses: ./.github/actions/build-windows-artifacts
with: with:
@@ -306,18 +306,15 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: true
- name: Set build image result - name: Set build image result
id: set-build-image-result id: set-build-image-result
@@ -344,14 +341,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }} src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -362,7 +358,6 @@ jobs:
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false dev-mode: false
upload-to-s3: true
update-version-info: true update-version-info: true
push-latest-tag: true push-latest-tag: true
@@ -382,7 +377,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Publish GitHub release - name: Publish GitHub release
uses: ./.github/actions/publish-github-release uses: ./.github/actions/publish-github-release
@@ -406,7 +400,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -432,7 +425,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -444,29 +436,6 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }} aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-doc-version:
name: Bump doc version
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners]
runs-on: ubuntu-20.04
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg
- name: Bump doc version
working-directory: cyborg
run: pnpm tsx bin/bump-doc-version.ts
env:
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team name: Send notification to Greptime team
@@ -476,17 +445,10 @@ jobs:
build-windows-artifacts, build-windows-artifacts,
] ]
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -4,20 +4,18 @@ on:
- cron: '4 2 * * *' - cron: '4 2 * * *'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs: jobs:
maintenance: maintenance:
name: Periodic Maintenance name: Periodic Maintenance
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
issues: write
pull-requests: write
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Do Maintenance - name: Do Maintenance
working-directory: cyborg working-directory: cyborg

View File

@@ -1,24 +1,18 @@
name: "Semantic Pull Request" name: "Semantic Pull Request"
on: on:
pull_request: pull_request_target:
types: types:
- opened - opened
- reopened - reopened
- edited - edited
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs: jobs:
check: check:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Check Pull Request - name: Check Pull Request
working-directory: cyborg working-directory: cyborg

View File

@@ -3,28 +3,30 @@
## Individual Committers (in alphabetical order) ## Individual Committers (in alphabetical order)
* [CookiePieWw](https://github.com/CookiePieWw) * [CookiePieWw](https://github.com/CookiePieWw)
* [KKould](https://github.com/KKould)
* [NiwakaDev](https://github.com/NiwakaDev)
* [etolbakov](https://github.com/etolbakov) * [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj) * [irenjj](https://github.com/irenjj)
* [KKould](https://github.com/KKould)
* [Lanqing Yang](https://github.com/lyang24)
* [NiwakaDev](https://github.com/NiwakaDev)
* [tisonkun](https://github.com/tisonkun) * [tisonkun](https://github.com/tisonkun)
* [Lanqing Yang](https://github.com/lyang24)
## Team Members (in alphabetical order) ## Team Members (in alphabetical order)
* [Breeze-P](https://github.com/Breeze-P)
* [GrepTime](https://github.com/GrepTime)
* [MichaelScofield](https://github.com/MichaelScofield)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [ZonaHex](https://github.com/ZonaHex)
* [apdong2022](https://github.com/apdong2022) * [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678) * [beryl678](https://github.com/beryl678)
* [Breeze-P](https://github.com/Breeze-P)
* [daviderli614](https://github.com/daviderli614) * [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9) * [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag) * [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun) * [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996) * [fengys1996](https://github.com/fengys1996)
* [GrepTime](https://github.com/GrepTime)
* [holalengyu](https://github.com/holalengyu) * [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008) * [killme2008](https://github.com/killme2008)
* [MichaelScofield](https://github.com/MichaelScofield)
* [nicecui](https://github.com/nicecui) * [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian) * [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong) * [shuiyisong](https://github.com/shuiyisong)
@@ -32,14 +34,11 @@
* [sunng87](https://github.com/sunng87) * [sunng87](https://github.com/sunng87)
* [v0y4g3r](https://github.com/v0y4g3r) * [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia) * [waynexia](https://github.com/waynexia)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [xtang](https://github.com/xtang) * [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01) * [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc) * [zhongzc](https://github.com/zhongzc)
* [ZonaHex](https://github.com/ZonaHex)
* [zyy17](https://github.com/zyy17) * [zyy17](https://github.com/zyy17)
## All Contributors ## All Contributors
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors) [![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

3997
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,7 @@ members = [
"src/promql", "src/promql",
"src/puffin", "src/puffin",
"src/query", "src/query",
"src/script",
"src/servers", "src/servers",
"src/session", "src/session",
"src/sql", "src/sql",
@@ -67,7 +68,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.13.0" version = "0.12.0"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
@@ -78,10 +79,11 @@ clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn" clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow" clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny" rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies] [workspace.dependencies]
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
# We turn off default-features for some dependencies here so the workspaces which inherit them can # We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false) # selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false). # for the inherited dependency but cannot do the reverse (override from true to false).
@@ -89,66 +91,55 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329 # See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] } ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3" aquamarine = "0.3"
arrow = { version = "53.0.0", features = ["prettyprint"] } arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] } arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "53.0" arrow-flight = "51.0"
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] } arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "53.0", features = ["serde"] } arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
# Remember to update axum-extra, axum-macros when updating axum axum = { version = "0.6", features = ["headers"] }
axum = "0.8"
axum-extra = "0.10"
axum-macros = "0.4"
backon = "1"
base64 = "0.21" base64 = "0.21"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12" bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] } bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
chrono-tz = "0.10.1"
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
config = "0.13.0" config = "0.13.0"
crossbeam-utils = "0.8" crossbeam-utils = "0.8"
dashmap = "5.4" dashmap = "5.4"
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
deadpool = "0.10"
deadpool-postgres = "0.12"
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" dotenv = "0.15"
etcd-client = "0.14" etcd-client = "0.13"
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a875e976441188028353f7274a46a7e6e065c5d4" }
hex = "0.4" hex = "0.4"
http = "1" http = "0.2"
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
hyper = "1.1"
hyper-util = "0.1"
itertools = "0.10" itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false } jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4" lazy_static = "1.4"
local-ip-address = "0.6" meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
mockall = "0.11.4" mockall = "0.11.4"
moka = "0.12" moka = "0.12"
nalgebra = "0.33" nalgebra = "0.33"
notify = "6.1" notify = "6.1"
num_cpus = "1.16" num_cpus = "1.16"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { version = "0.27", features = [ opentelemetry-proto = { version = "0.5", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
@@ -156,14 +147,12 @@ opentelemetry-proto = { version = "0.27", features = [
"logs", "logs",
] } ] }
parking_lot = "0.12" parking_lot = "0.12"
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] } parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [ promql-parser = { version = "0.4.3", features = ["ser"] }
"ser", prost = "0.12"
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
prost = "0.13"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8" rand = "0.8"
ratelimit = "0.9" ratelimit = "0.9"
@@ -182,36 +171,33 @@ rstest = "0.21"
rstest_reuse = "0.7" rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
rustc-hash = "2.0" rustc-hash = "2.0"
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3" serde_with = "3"
shadow-rs = "0.38" shadow-rs = "0.35"
similar-asserts = "1.6.0" similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.8" snafu = "0.8"
sysinfo = "0.30" sysinfo = "0.30"
# on branch v0.52.x # on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor", "visitor",
"serde", "serde",
] } # on branch v0.44.x ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.40", features = ["full"] } tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7" tokio-postgres = "0.7"
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
tokio-stream = "0.1" tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] } tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = "0.5" tower = "0.4"
tracing-appender = "0.2" tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2" typetag = "0.2"
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13" zstd = "0.13"
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
@@ -268,6 +254,7 @@ plugins = { path = "src/plugins" }
promql = { path = "src/promql" } promql = { path = "src/promql" }
puffin = { path = "src/puffin" } puffin = { path = "src/puffin" }
query = { path = "src/query" } query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" } servers = { path = "src/servers" }
session = { path = "src/session" } session = { path = "src/session" }
sql = { path = "src/sql" } sql = { path = "src/sql" }
@@ -277,16 +264,18 @@ table = { path = "src/table" }
[patch.crates-io] [patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work" # change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1 # This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526 # see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" } # aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
# Apply a fix for pprof for unaligned pointer access
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" rev = "a10facb353b41460eeb98578868ebf19c2084fac"
[profile.release] [profile.release]
debug = 1 debug = 1

View File

@@ -1,6 +1,3 @@
[target.aarch64-unknown-linux-gnu]
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
[build] [build]
pre-build = [ pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH", "dpkg --add-architecture $CROSS_DEB_ARCH",
@@ -8,8 +5,3 @@ pre-build = [
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/", "curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google", "chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
] ]
[build.env]
passthrough = [
"JEMALLOC_SYS_WITH_LG_PAGE",
]

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746 DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu BASE_IMAGE ?= ubuntu
@@ -60,8 +60,6 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64) else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
else else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif endif
@@ -167,14 +165,15 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS} cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1 RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz .PHONY: fuzz
fuzz: ## Run fuzz test ${FUZZ_TARGET}. fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS} cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls .PHONY: fuzz-ls
fuzz-ls: ## List all fuzz targets. fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check .PHONY: check

View File

@@ -13,7 +13,7 @@
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> | <a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> | <a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> | <a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a> <a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4> </h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
@@ -116,7 +116,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
--name greptime --rm \ --name greptime --rm \
greptime/greptimedb:latest standalone start \ greptime/greptimedb:latest standalone start \
--http-addr 0.0.0.0:4000 \ --http-addr 0.0.0.0:4000 \
--rpc-bind-addr 0.0.0.0:4001 \ --rpc-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:4002 \ --mysql-addr 0.0.0.0:4002 \
--postgres-addr 0.0.0.0:4003 --postgres-addr 0.0.0.0:4003
``` ```
@@ -138,8 +138,7 @@ Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly) * [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15) * [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora) * Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
* Python toolchain (optional): Required only if using some test scripts.
Build GreptimeDB binary: Build GreptimeDB binary:
@@ -229,3 +228,4 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/). - GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer. - [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/). - GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -26,10 +26,8 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -40,7 +38,6 @@
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -50,7 +47,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -60,8 +56,6 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
@@ -69,8 +63,8 @@
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -92,18 +86,15 @@
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. | | `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. | | `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. | | `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. | | `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
@@ -141,10 +132,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -152,16 +143,15 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -178,8 +168,6 @@
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -224,11 +212,9 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -239,7 +225,6 @@
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -249,7 +234,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -259,8 +243,6 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
@@ -310,16 +292,13 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. | | `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. | | `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. | | `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" | | `store_addrs` | Array | -- | Store server address default to etcd store. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. | | `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` | | `backend` | String | `EtcdStore` | The datastore for meta server. |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". | | `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. | | `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). | | `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. | | `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
@@ -344,7 +323,7 @@
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` | | `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. | | `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) | | `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. | | `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. | | `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. | | `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. | | `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -387,14 +366,19 @@
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. | | `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. | | `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. | | `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
@@ -482,10 +466,10 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
@@ -493,16 +477,15 @@
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `8MiB` | Page size for inverted index content cache. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -519,8 +502,6 @@
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -553,18 +534,12 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. | | `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. | | `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. | | `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | | `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. | | `meta_client.timeout` | String | `3s` | Operation timeout. |

View File

@@ -19,6 +19,26 @@ init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited. ## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0 max_concurrent_queries = 0
## Deprecated, use `grpc.addr` instead.
## @toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead.
## @toml2docs:none-default
rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead.
## @toml2docs:none-default
rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data. Enabled by default. ## Enable telemetry to collect anonymous usage data. Enabled by default.
#+ enable_telemetry = true #+ enable_telemetry = true
@@ -36,11 +56,10 @@ body_limit = "64MB"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:3001" addr = "127.0.0.1:3001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:3001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
@@ -456,18 +475,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
write_cache_size = "5GiB" experimental_write_cache_size = "5GiB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -497,20 +516,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -538,6 +543,15 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = "" intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine. ## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index] [region_engine.mito.fulltext_index]
@@ -608,12 +622,6 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -5,19 +5,13 @@ mode = "distributed"
## @toml2docs:none-default ## @toml2docs:none-default
node_id = 14 node_id = 14
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:6800" addr = "127.0.0.1:6800"
## The address advertised to the metasrv, ## The hostname advertised to the metasrv,
## and used for connections from outside the host ## and used for connections from outside the host
server_addr = "127.0.0.1:6800" hostname = "127.0.0.1"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
@@ -25,16 +19,6 @@ max_recv_message_size = "512MB"
## The maximum send message size for gRPC server. ## The maximum send message size for gRPC server.
max_send_message_size = "512MB" max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The metasrv client options. ## The metasrv client options.
[meta_client] [meta_client]

View File

@@ -31,21 +31,14 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
@@ -74,9 +67,6 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -108,9 +98,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -138,11 +125,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.

View File

@@ -4,35 +4,17 @@ data_home = "/tmp/metasrv/"
## The bind address of metasrv. ## The bind address of metasrv.
bind_addr = "127.0.0.1:3002" bind_addr = "127.0.0.1:3002"
## The communication server address for the frontend and datanode to connect to metasrv. ## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `bind_addr`.
server_addr = "127.0.0.1:3002" server_addr = "127.0.0.1:3002"
## Store server address default to etcd store. ## Store server address default to etcd store.
## For postgres store, the format is:
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
store_addrs = ["127.0.0.1:2379"] store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix. ## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = "" store_key_prefix = ""
## The datastore for meta server. ## The datastore for meta server.
## Available values: backend = "EtcdStore"
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
## **Only used when backend is `postgres_store`.**
meta_table_name = "greptime_metakv"
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
## Only used when backend is `postgres_store`.
meta_election_lock_id = 1
## Datanode selector type. ## Datanode selector type.
## - `round_robin` (default value) ## - `round_robin` (default value)
@@ -50,9 +32,6 @@ use_memory_store = false
## - Using shared storage (e.g., s3). ## - Using shared storage (e.g., s3).
enable_region_failover = false enable_region_failover = false
## Max allowed idle time before removing node info from metasrv memory.
node_max_idle_time = "24hours"
## Whether to enable greptimedb telemetry. Enabled by default. ## Whether to enable greptimedb telemetry. Enabled by default.
#+ enable_telemetry = true #+ enable_telemetry = true
@@ -134,8 +113,6 @@ num_topics = 64
selector_type = "round_robin" selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. ## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## Only accepts strings that match the following regular expression pattern:
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. ## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic" topic_name_prefix = "greptimedb_wal_topic"

View File

@@ -39,17 +39,11 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
@@ -78,9 +72,6 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -112,9 +103,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -142,11 +130,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.
@@ -170,11 +153,11 @@ dir = "/tmp/greptimedb/wal"
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
file_size = "128MB" file_size = "128MB"
## The threshold of the WAL size to trigger a purge. ## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_threshold = "1GB" purge_threshold = "1GB"
## The interval to trigger a purge. ## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_interval = "1m" purge_interval = "1m"
@@ -289,12 +272,10 @@ overwrite_entry_start_id = false
## Metadata storage options. ## Metadata storage options.
[metadata_store] [metadata_store]
## The size of the metadata store log file. ## Kv file size in bytes.
file_size = "64MB" file_size = "256MB"
## The threshold of the metadata store size to trigger a purge. ## Kv purge threshold.
purge_threshold = "256MB" purge_threshold = "4GB"
## The interval of the metadata store to trigger a purge.
purge_interval = "1m"
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -303,12 +284,6 @@ max_retry_times = 3
## Initial retry delay of procedures, increases exponentially ## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
# Example of using S3 as the storage. # Example of using S3 as the storage.
# [storage] # [storage]
# type = "S3" # type = "S3"
@@ -362,7 +337,7 @@ data_home = "/tmp/greptimedb/"
type = "File" type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance. ## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
## A local file directory, defaults to `{data_home}`. An empty string means disabling. ## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
## @toml2docs:none-default ## @toml2docs:none-default
#+ cache_path = "" #+ cache_path = ""
@@ -543,18 +518,18 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}/object_cache/write`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
write_cache_size = "5GiB" experimental_write_cache_size = "5GiB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -584,20 +559,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -625,6 +586,15 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = "" intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "8MiB"
## The options for full-text index in Mito engine. ## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index] [region_engine.mito.fulltext_index]
@@ -695,12 +665,6 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -1,75 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const docsClient = obtainClient("DOCS_REPO_TOKEN")
try {
await docsClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "docs",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
function determineWorkflow(version: string): [string, string] {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
try {
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
triggerWorkflow(workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

View File

@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode. # Build the project in release mode.
RUN --mount=target=.,rw \ RUN --mount=target=.,rw \

View File

@@ -7,8 +7,10 @@ ARG OUTPUT_DIR
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \ RUN --mount=type=cache,target=/var/cache/apt \

View File

@@ -9,20 +9,16 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
# Install dependencies. # Install dependencies.
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
libssl-dev \ libssl-dev \
protobuf-compiler \
curl \ curl \
git \ git \
unzip \
build-essential \ build-essential \
pkg-config pkg-config \
python3 \
# Install protoc python3-dev \
ARG PROTOBUF_VERSION=29.3 python3-pip \
&& pip3 install --upgrade pip \
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \ && pip3 install pyarrow
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Trust workdir # Trust workdir
RUN git config --global --add safe.directory /greptimedb RUN git config --global --add safe.directory /greptimedb

View File

@@ -12,21 +12,18 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
# Install protoc # Install protoc
ARG PROTOBUF_VERSION=29.3 RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Install Rust toolchains. # Install Rust toolchains.
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN

View File

@@ -1,4 +1,4 @@
FROM ubuntu:22.04 FROM ubuntu:20.04
# The root path under which contains all the dependencies to build this Dockerfile. # The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=. ARG DOCKER_BUILD_ROOT=.
@@ -6,8 +6,11 @@ ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \ libssl-dev \
@@ -17,36 +20,55 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \ ca-certificates \
git \ git \
build-essential \ build-essential \
pkg-config pkg-config \
python3.10 \
python3.10-dev
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN echo "target platform: $TARGETPLATFORM" RUN echo "target platform: $TARGETPLATFORM"
ARG PROTOBUF_VERSION=29.3
# Install protobuf, because the one in the apt is too old (v3.12). # Install protobuf, because the one in the apt is too old (v3.12).
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \ unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \ elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \ unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
fi fi
RUN mv protoc3/bin/* /usr/local/bin/ RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/ RUN mv protoc3/include/* /usr/local/include/
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
# Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \
apt-get -y autoremove && \
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules. # Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build # Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves), # image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention # and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
# of the Git's addition to the "safe.directory" at the first place (see the commit message here: # of the Git's addition to the "safe.directory" at the first place (see the commit message here:
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9). # https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using # There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user. # wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker, # It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules. # it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*' RUN git config --global --add safe.directory '*'
# Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust. # Install Rust.
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y

View File

@@ -0,0 +1,51 @@
# Use the legacy glibc 2.28.
FROM ubuntu:18.10
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
tzdata \
curl \
ca-certificates \
git \
build-essential \
unzip \
pkg-config
# Install protoc.
ENV PROTOC_VERSION=25.1
RUN if [ "$(uname -m)" = "x86_64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
elif [ "$(uname -m)" = "aarch64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
else \
echo "Unsupported architecture"; exit 1; \
fi && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
rm -f ${PROTOC_ZIP}
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
RUN cargo install cargo-binstall --version 1.6.6 --locked
# Install nextest.
RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -1,66 +0,0 @@
FROM ubuntu:20.04
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
tzdata \
curl \
unzip \
ca-certificates \
git \
build-essential \
pkg-config
ARG TARGETPLATFORM
RUN echo "target platform: $TARGETPLATFORM"
ARG PROTOBUF_VERSION=29.3
# Install protobuf, because the one in the apt is too old (v3.12).
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*'
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest.
RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -39,16 +39,14 @@ services:
container_name: metasrv container_name: metasrv
ports: ports:
- 3002:3002 - 3002:3002
- 3000:3000
command: command:
- metasrv - metasrv
- start - start
- --rpc-bind-addr=0.0.0.0:3002 - --bind-addr=0.0.0.0:3002
- --rpc-server-addr=metasrv:3002 - --server-addr=metasrv:3002
- --store-addrs=etcd0:2379 - --store-addrs=etcd0:2379
- --http-addr=0.0.0.0:3000
healthcheck: healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ] test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 5 retries: 5
@@ -68,17 +66,17 @@ services:
- datanode - datanode
- start - start
- --node-id=0 - --node-id=0
- --rpc-bind-addr=0.0.0.0:3001 - --rpc-addr=0.0.0.0:3001
- --rpc-server-addr=datanode0:3001 - --rpc-hostname=datanode0:3001
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000 - --http-addr=0.0.0.0:5000
volumes: volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb - /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck: healthcheck:
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ] test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 10 retries: 5
depends_on: depends_on:
metasrv: metasrv:
condition: service_healthy condition: service_healthy
@@ -98,7 +96,7 @@ services:
- start - start
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000 - --http-addr=0.0.0.0:4000
- --rpc-bind-addr=0.0.0.0:4001 - --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002 - --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003 - --postgres-addr=0.0.0.0:4003
healthcheck: healthcheck:
@@ -117,23 +115,16 @@ services:
container_name: flownode0 container_name: flownode0
ports: ports:
- 4004:4004 - 4004:4004
- 4005:4005
command: command:
- flownode - flownode
- start - start
- --node-id=0 - --node-id=0
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --rpc-bind-addr=0.0.0.0:4004 - --rpc-addr=0.0.0.0:4004
- --rpc-server-addr=flownode0:4004 - --rpc-hostname=flownode0:4004
- --http-addr=0.0.0.0:4005
depends_on: depends_on:
frontend0: frontend0:
condition: service_healthy condition: service_healthy
healthcheck:
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
interval: 5s
timeout: 3s
retries: 5
networks: networks:
- greptimedb - greptimedb

View File

@@ -0,0 +1,5 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

View File

@@ -1,40 +0,0 @@
# TSBS benchmark - v0.12.0
## Environment
### Amazon EC2
| | |
|---------|-------------------------|
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 100GB (GP3) |
| OS | Ubuntu Server 24.04 LTS |
## Write performance
| Environment | Ingest rate (rows/s) |
|-----------------|----------------------|
| EC2 c5d.2xlarge | 326839.28 |
## Query performance
| Query type | EC2 c5d.2xlarge (ms) |
|-----------------------|----------------------|
| cpu-max-all-1 | 12.46 |
| cpu-max-all-8 | 24.20 |
| double-groupby-1 | 673.08 |
| double-groupby-5 | 963.99 |
| double-groupby-all | 1330.05 |
| groupby-orderby-limit | 952.46 |
| high-cpu-1 | 5.08 |
| high-cpu-all | 4638.57 |
| lastpoint | 591.02 |
| single-groupby-1-1-1 | 4.06 |
| single-groupby-1-1-12 | 4.73 |
| single-groupby-1-8-1 | 8.23 |
| single-groupby-5-1-1 | 4.61 |
| single-groupby-5-1-12 | 5.61 |
| single-groupby-5-8-1 | 9.74 |

View File

@@ -20,3 +20,31 @@ Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
``` ```
## Using `perf`
First find the pid of GreptimeDB:
Using `perf record` to profile GreptimeDB, at the sampling frequency of 99 hertz, and a duration of 60 seconds:
```bash
perf record -p <pid> --call-graph dwarf -F 99 -- sleep 60
```
The result will be saved to file `perf.data`.
Then
```bash
perf script --no-inline > perf.out
```
Produce a flame graph out of it:
```bash
git clone https://github.com/brendangregg/FlameGraph
FlameGraph/stackcollapse-perf.pl perf.out > perf.folded
FlameGraph/flamegraph.pl perf.folded > perf.svg
```

View File

@@ -4,16 +4,6 @@ This crate provides an easy approach to dump memory profiling info.
## Prerequisites ## Prerequisites
### jemalloc ### jemalloc
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
```
# find jeprof binary
find . -name 'jeprof'
# add executable permission
chmod +x <path_to_jeprof>
```
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
```bash ```bash
# for macOS # for macOS
brew install jemalloc brew install jemalloc
@@ -33,11 +23,7 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
Start GreptimeDB instance with environment variables: Start GreptimeDB instance with environment variables:
```bash ```bash
# for Linux
MALLOC_CONF=prof:true ./target/debug/greptime standalone start MALLOC_CONF=prof:true ./target/debug/greptime standalone start
# for macOS
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
``` ```
Dump memory profiling data through HTTP API: Dump memory profiling data through HTTP API:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 36 KiB

BIN
docs/logo-text-padding.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 25 KiB

100
flake.lock generated
View File

@@ -1,100 +0,0 @@
{
"nodes": {
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1737613896,
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
"owner": "nix-community",
"repo": "fenix",
"rev": "303a062fdd8e89f233db05868468975d17855d80",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1737569578,
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1737581772,
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,56 +0,0 @@
{
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, fenix, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
buildInputs = with pkgs; [
libgit2
libz
];
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
};
in
{
devShells.default = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
git
clang
gcc
protobuf
gnumake
mold
(rustToolchain.withComponents [
"cargo"
"clippy"
"rust-src"
"rustc"
"rustfmt"
"rust-analyzer"
"llvm-tools"
])
cargo-nextest
cargo-llvm-cov
taplo
curl
gnuplot ## for cargo bench
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
};
});
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,3 @@
[toolchain] [toolchain]
channel = "nightly-2024-12-25" channel = "nightly-2024-10-19"
components = ["rust-analyzer"]

View File

@@ -14,7 +14,6 @@
import os import os
import re import re
from multiprocessing import Pool
def find_rust_files(directory): def find_rust_files(directory):
@@ -34,11 +33,13 @@ def extract_branch_names(file_content):
return pattern.findall(file_content) return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files_content): def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu" branch_name_snafu = f"{branch_name}Snafu"
for content in rust_files_content.values(): for rust_file in rust_files:
if branch_name_snafu in content: with open(rust_file, "r") as file:
return True content = file.read()
if branch_name_snafu in content:
return True
return False return False
@@ -48,24 +49,21 @@ def main():
for error_file in error_files: for error_file in error_files:
with open(error_file, "r") as file: with open(error_file, "r") as file:
branch_names.extend(extract_branch_names(file.read())) content = file.read()
branch_names.extend(extract_branch_names(content))
# Read all rust files into memory once unused_snafu = [
rust_files_content = {} branch_name
for rust_file in other_rust_files: for branch_name in branch_names
with open(rust_file, "r") as file: if not check_snafu_in_files(branch_name, other_rust_files)
rust_files_content[rust_file] = file.read() ]
with Pool() as pool:
results = pool.starmap(
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
)
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
if unused_snafu: if unused_snafu:
print("Unused error variants:") print("Unused error variants:")
for name in unused_snafu: for name in unused_snafu:
print(name) print(name)
if unused_snafu:
raise SystemExit(1) raise SystemExit(1)

27
shell.nix Normal file
View File

@@ -0,0 +1,27 @@
let
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-unstable";
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
pkgs = import nixpkgs { config = {}; overlays = []; };
in
pkgs.mkShell rec {
nativeBuildInputs = with pkgs; [
pkg-config
git
clang
gcc
protobuf
mold
(fenix.fromToolchainFile {
dir = ./.;
})
cargo-nextest
taplo
];
buildInputs = with pkgs; [
libgit2
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
}

View File

@@ -15,10 +15,13 @@ common-macro.workspace = true
common-time.workspace = true common-time.workspace = true
datatypes.workspace = true datatypes.workspace = true
greptime-proto.workspace = true greptime-proto.workspace = true
paste.workspace = true paste = "1.0"
prost.workspace = true prost.workspace = true
serde_json.workspace = true serde_json.workspace = true
snafu.workspace = true snafu.workspace = true
[build-dependencies] [build-dependencies]
tonic-build = "0.11" tonic-build = "0.11"
[dev-dependencies]
paste = "1.0"

View File

@@ -33,7 +33,7 @@ pub enum Error {
#[snafu(implicit)] #[snafu(implicit)]
location: Location, location: Location,
#[snafu(source)] #[snafu(source)]
error: prost::UnknownEnumValue, error: prost::DecodeError,
}, },
#[snafu(display("Failed to create column datatype from {:?}", from))] #[snafu(display("Failed to create column datatype from {:?}", from))]

View File

@@ -86,7 +86,7 @@ impl ColumnDataTypeWrapper {
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension. /// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) { pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext) (self.datatype, self.datatype_ext.clone())
} }
} }
@@ -685,18 +685,14 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec( IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
values.interval_year_month_values, values.interval_year_month_values,
)), )),
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values( IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
values values.interval_day_time_values,
.interval_day_time_values
.iter()
.map(|x| IntervalDayTime::from_i64(*x).into()),
)), )),
IntervalType::MonthDayNano(_) => { IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values( Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values values.interval_month_day_nano_values.iter().map(|x| {
.interval_month_day_nano_values IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
.iter() }),
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
)) ))
} }
}, },
@@ -1499,22 +1495,14 @@ mod tests {
column.values.as_ref().unwrap().interval_year_month_values column.values.as_ref().unwrap().interval_year_month_values
); );
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![ let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
IntervalDayTime::new(0, 4).into(),
IntervalDayTime::new(0, 5).into(),
IntervalDayTime::new(0, 6).into(),
]));
push_vals(&mut column, 3, vector); push_vals(&mut column, 3, vector);
assert_eq!( assert_eq!(
vec![4, 5, 6], vec![4, 5, 6],
column.values.as_ref().unwrap().interval_day_time_values column.values.as_ref().unwrap().interval_day_time_values
); );
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![ let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
IntervalMonthDayNano::new(0, 0, 7).into(),
IntervalMonthDayNano::new(0, 0, 8).into(),
IntervalMonthDayNano::new(0, 0, 9).into(),
]));
let len = vector.len(); let len = vector.len();
push_vals(&mut column, 3, vector); push_vals(&mut column, 3, vector);
(0..len).for_each(|i| { (0..len).for_each(|i| {

View File

@@ -15,10 +15,10 @@
use std::collections::HashMap; use std::collections::HashMap;
use datatypes::schema::{ use datatypes::schema::{
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexOptions, ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
}; };
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType}; use greptime_proto::v1::Analyzer;
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{self, Result}; use crate::error::{self, Result};
@@ -34,8 +34,10 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`. /// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> { pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = let data_type = ColumnDataTypeWrapper::try_new(
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?; column_def.data_type,
column_def.datatype_extension.clone(),
)?;
let constraint = if column_def.default_constraint.is_empty() { let constraint = if column_def.default_constraint.is_empty() {
None None
@@ -55,13 +57,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
} }
if let Some(options) = column_def.options.as_ref() { if let Some(options) = column_def.options.as_ref() {
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) { if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned()); metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
} }
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) { if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned()); metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
} }
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) { if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned()); metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
} }
} }
@@ -80,7 +82,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) { if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options options
.options .options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned()); .insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
} }
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) { if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
options options
@@ -100,14 +102,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool { pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
options options
.as_ref() .as_ref()
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY)) .map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
}
/// Checks if the `ColumnOptions` contains skipping index options.
pub fn contains_skipping(options: &Option<ColumnOptions>) -> bool {
options
.as_ref()
.is_some_and(|o| o.options.contains_key(SKIPPING_INDEX_GRPC_KEY))
} }
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`. /// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
@@ -120,18 +115,6 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
Ok((!options.options.is_empty()).then_some(options)) Ok((!options.options.is_empty()).then_some(options))
} }
/// Tries to construct a `ColumnOptions` from the given `SkippingIndexOptions`.
pub fn options_from_skipping(skipping: &SkippingIndexOptions) -> Result<Option<ColumnOptions>> {
let mut options = ColumnOptions::default();
let v = serde_json::to_string(skipping).context(error::SerializeJsonSnafu)?;
options
.options
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), v);
Ok((!options.options.is_empty()).then_some(options))
}
/// Tries to construct a `FulltextAnalyzer` from the given analyzer. /// Tries to construct a `FulltextAnalyzer` from the given analyzer.
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer { pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
match analyzer { match analyzer {
@@ -140,13 +123,6 @@ pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
} }
} }
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
match skipping_index_type {
PbSkippingIndexType::BloomFilter => SkippingIndexType::BloomFilter,
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -205,14 +181,14 @@ mod tests {
let options = options_from_column_schema(&schema); let options = options_from_column_schema(&schema);
assert!(options.is_none()); assert!(options.is_none());
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true) let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions { .with_fulltext_options(FulltextOptions {
enable: true, enable: true,
analyzer: FulltextAnalyzer::English, analyzer: FulltextAnalyzer::English,
case_sensitive: false, case_sensitive: false,
}) })
.unwrap(); .unwrap()
schema.set_inverted_index(true); .set_inverted_index(true);
let options = options_from_column_schema(&schema).unwrap(); let options = options_from_column_schema(&schema).unwrap();
assert_eq!( assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(), options.options.get(FULLTEXT_GRPC_KEY).unwrap(),

View File

@@ -15,7 +15,7 @@ api.workspace = true
arrow.workspace = true arrow.workspace = true
arrow-schema.workspace = true arrow-schema.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait.workspace = true async-trait = "0.1"
bytes.workspace = true bytes.workspace = true
common-catalog.workspace = true common-catalog.workspace = true
common-error.workspace = true common-error.workspace = true
@@ -31,7 +31,7 @@ common-version.workspace = true
dashmap.workspace = true dashmap.workspace = true
datafusion.workspace = true datafusion.workspace = true
datatypes.workspace = true datatypes.workspace = true
futures.workspace = true futures = "0.3"
futures-util.workspace = true futures-util.workspace = true
humantime.workspace = true humantime.workspace = true
itertools.workspace = true itertools.workspace = true
@@ -39,7 +39,7 @@ lazy_static.workspace = true
meta-client.workspace = true meta-client.workspace = true
moka = { workspace = true, features = ["future", "sync"] } moka = { workspace = true, features = ["future", "sync"] }
partition.workspace = true partition.workspace = true
paste.workspace = true paste = "1.0"
prometheus.workspace = true prometheus.workspace = true
rustc-hash.workspace = true rustc-hash.workspace = true
serde_json.workspace = true serde_json.workspace = true
@@ -49,7 +49,7 @@ sql.workspace = true
store-api.workspace = true store-api.workspace = true
table.workspace = true table.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream.workspace = true tokio-stream = "0.1"
[dev-dependencies] [dev-dependencies]
cache.workspace = true cache.workspace = true

View File

@@ -122,6 +122,13 @@ pub enum Error {
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to create table, table info: {}", table_info))] #[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable { CreateTable {
table_info: String, table_info: String,
@@ -336,7 +343,9 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. } => source.status_code(), Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::Internal { source, .. } => source.status_code(), Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied, Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None), Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),

View File

@@ -18,7 +18,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Duration; use std::time::Duration;
use common_error::ext::BoxedError; use common_error::ext::{BoxedError, ErrorExt};
use common_meta::cache_invalidator::KvCacheInvalidator; use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::CacheNotGet; use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result}; use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
@@ -37,6 +37,7 @@ use snafu::{OptionExt, ResultExt};
use crate::metrics::{ use crate::metrics::{
METRIC_CATALOG_KV_BATCH_GET, METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET, METRIC_CATALOG_KV_BATCH_GET, METRIC_CATALOG_KV_GET, METRIC_CATALOG_KV_REMOTE_GET,
METRIC_META_CLIENT_GET,
}; };
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000; const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
@@ -292,7 +293,7 @@ impl KvBackend for CachedKvBackend {
} }
.map_err(|e| { .map_err(|e| {
GetKvCacheSnafu { GetKvCacheSnafu {
err_msg: e.to_string(), err_msg: e.output_msg(),
} }
.build() .build()
}); });
@@ -303,7 +304,7 @@ impl KvBackend for CachedKvBackend {
.lock() .lock()
.unwrap() .unwrap()
.as_ref() .as_ref()
.is_some_and(|v| !self.validate_version(*v)) .map_or(false, |v| !self.validate_version(*v))
{ {
self.cache.invalidate(key).await; self.cache.invalidate(key).await;
} }
@@ -445,6 +446,8 @@ impl KvBackend for MetaKvBackend {
} }
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> { async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
let _timer = METRIC_META_CLIENT_GET.start_timer();
let mut response = self let mut response = self
.client .client
.range(RangeRequest::new().with_key(key)) .range(RangeRequest::new().with_key(key))

View File

@@ -41,7 +41,6 @@ pub mod information_schema {
} }
pub mod table_source; pub mod table_source;
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait CatalogManager: Send + Sync { pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any; fn as_any(&self) -> &dyn Any;

View File

@@ -34,4 +34,6 @@ lazy_static! {
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap(); register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram = pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap(); register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
pub static ref METRIC_META_CLIENT_GET: Histogram =
register_histogram!("greptime_meta_client_get", "meta client get").unwrap();
} }

View File

@@ -64,7 +64,6 @@ const INIT_CAPACITY: usize = 42;
/// - `uptime`: the uptime of the peer. /// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer. /// - `active_time`: the time since the last activity of the peer.
/// ///
#[derive(Debug)]
pub(super) struct InformationSchemaClusterInfo { pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef, schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -45,7 +45,6 @@ use crate::error::{
use crate::information_schema::Predicates; use crate::information_schema::Predicates;
use crate::CatalogManager; use crate::CatalogManager;
#[derive(Debug)]
pub(super) struct InformationSchemaColumns { pub(super) struct InformationSchemaColumns {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -61,7 +61,7 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
pub const OPTIONS: &str = "options"; pub const OPTIONS: &str = "options";
/// The `information_schema.flows` to provides information about flows in databases. /// The `information_schema.flows` to provides information about flows in databases.
#[derive(Debug)] ///
pub(super) struct InformationSchemaFlows { pub(super) struct InformationSchemaFlows {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -58,11 +58,8 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX"; pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
/// Fulltext index constraint name /// Fulltext index constraint name
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX"; pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
/// Skipping index constraint name
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`. /// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
#[derive(Debug)]
pub(super) struct InformationSchemaKeyColumnUsage { pub(super) struct InformationSchemaKeyColumnUsage {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,
@@ -249,12 +246,10 @@ impl InformationSchemaKeyColumnUsageBuilder {
if column.is_inverted_indexed() { if column.is_inverted_indexed() {
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME); constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
} }
if column.is_fulltext_indexed() {
if column.has_fulltext_index_key() {
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME); constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
} }
if column.is_skipping_indexed() {
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
}
if !constraints.is_empty() { if !constraints.is_empty() {
let aggregated_constraints = constraints.join(", "); let aggregated_constraints = constraints.join(", ");

View File

@@ -59,7 +59,6 @@ const INIT_CAPACITY: usize = 42;
/// The `PARTITIONS` table provides information about partitioned tables. /// The `PARTITIONS` table provides information about partitioned tables.
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html /// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id. /// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
#[derive(Debug)]
pub(super) struct InformationSchemaPartitions { pub(super) struct InformationSchemaPartitions {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -56,7 +56,7 @@ const INIT_CAPACITY: usize = 42;
/// - `end_time`: the ending execution time of the procedure. /// - `end_time`: the ending execution time of the procedure.
/// - `status`: the status of the procedure. /// - `status`: the status of the procedure.
/// - `lock_keys`: the lock keys of the procedure. /// - `lock_keys`: the lock keys of the procedure.
#[derive(Debug)] ///
pub(super) struct InformationSchemaProcedureInfo { pub(super) struct InformationSchemaProcedureInfo {
schema: SchemaRef, schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -59,7 +59,7 @@ const INIT_CAPACITY: usize = 42;
/// - `is_leader`: whether the peer is the leader /// - `is_leader`: whether the peer is the leader
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`. /// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
/// - `down_seconds`: the duration of being offline, in seconds. /// - `down_seconds`: the duration of being offline, in seconds.
#[derive(Debug)] ///
pub(super) struct InformationSchemaRegionPeers { pub(super) struct InformationSchemaRegionPeers {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -63,7 +63,7 @@ const INIT_CAPACITY: usize = 42;
/// - `index_size`: The sst index files size in bytes. /// - `index_size`: The sst index files size in bytes.
/// - `engine`: The engine type. /// - `engine`: The engine type.
/// - `region_role`: The region role. /// - `region_role`: The region role.
#[derive(Debug)] ///
pub(super) struct InformationSchemaRegionStatistics { pub(super) struct InformationSchemaRegionStatistics {
schema: SchemaRef, schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -38,7 +38,6 @@ use store_api::storage::{ScanRequest, TableId};
use super::{InformationTable, RUNTIME_METRICS}; use super::{InformationTable, RUNTIME_METRICS};
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
#[derive(Debug)]
pub(super) struct InformationSchemaMetrics { pub(super) struct InformationSchemaMetrics {
schema: SchemaRef, schema: SchemaRef,
} }

View File

@@ -49,7 +49,6 @@ pub const SCHEMA_OPTS: &str = "options";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation. /// The `information_schema.schemata` table implementation.
#[derive(Debug)]
pub(super) struct InformationSchemaSchemata { pub(super) struct InformationSchemaSchemata {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -43,7 +43,6 @@ use crate::information_schema::Predicates;
use crate::CatalogManager; use crate::CatalogManager;
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints. /// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
#[derive(Debug)]
pub(super) struct InformationSchemaTableConstraints { pub(super) struct InformationSchemaTableConstraints {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -71,7 +71,6 @@ const TABLE_ID: &str = "table_id";
pub const ENGINE: &str = "engine"; pub const ENGINE: &str = "engine";
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;
#[derive(Debug)]
pub(super) struct InformationSchemaTables { pub(super) struct InformationSchemaTables {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -54,7 +54,6 @@ pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
pub const COLLATION_CONNECTION: &str = "collation_connection"; pub const COLLATION_CONNECTION: &str = "collation_connection";
/// The `information_schema.views` to provides information about views in databases. /// The `information_schema.views` to provides information about views in databases.
#[derive(Debug)]
pub(super) struct InformationSchemaViews { pub(super) struct InformationSchemaViews {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -33,7 +33,6 @@ use super::SystemTable;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
/// A memory table with specified schema and columns. /// A memory table with specified schema and columns.
#[derive(Debug)]
pub(crate) struct MemoryTable { pub(crate) struct MemoryTable {
pub(crate) table_id: TableId, pub(crate) table_id: TableId,
pub(crate) table_name: &'static str, pub(crate) table_name: &'static str,

View File

@@ -14,7 +14,6 @@
mod pg_catalog_memory_table; mod pg_catalog_memory_table;
mod pg_class; mod pg_class;
mod pg_database;
mod pg_namespace; mod pg_namespace;
mod table_names; mod table_names;
@@ -27,7 +26,6 @@ use lazy_static::lazy_static;
use paste::paste; use paste::paste;
use pg_catalog_memory_table::get_schema_columns; use pg_catalog_memory_table::get_schema_columns;
use pg_class::PGClass; use pg_class::PGClass;
use pg_database::PGDatabase;
use pg_namespace::PGNamespace; use pg_namespace::PGNamespace;
use session::context::{Channel, QueryContext}; use session::context::{Channel, QueryContext};
use table::TableRef; use table::TableRef;
@@ -115,10 +113,6 @@ impl PGCatalogProvider {
PG_CLASS.to_string(), PG_CLASS.to_string(),
self.build_table(PG_CLASS).expect(PG_NAMESPACE), self.build_table(PG_CLASS).expect(PG_NAMESPACE),
); );
tables.insert(
PG_DATABASE.to_string(),
self.build_table(PG_DATABASE).expect(PG_DATABASE),
);
self.tables = tables; self.tables = tables;
} }
} }
@@ -141,11 +135,6 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
self.catalog_manager.clone(), self.catalog_manager.clone(),
self.namespace_oid_map.clone(), self.namespace_oid_map.clone(),
))), ))),
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
))),
_ => None, _ => None,
} }
} }

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::fmt;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -101,15 +100,6 @@ impl PGClass {
} }
} }
impl fmt::Debug for PGClass {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PGClass")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl SystemTable for PGClass { impl SystemTable for PGClass {
fn table_id(&self) -> table::metadata::TableId { fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_CLASS_TABLE_ID PG_CATALOG_PG_CLASS_TABLE_ID

View File

@@ -1,223 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
use common_error::ext::BoxedError;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
use store_api::storage::ScanRequest;
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::information_schema::Predicates;
use crate::system_schema::utils::tables::{string_column, u32_column};
use crate::system_schema::SystemTable;
use crate::CatalogManager;
// === column name ===
pub const DATNAME: &str = "datname";
/// The initial capacity of the vector builders.
const INIT_CAPACITY: usize = 42;
/// The `pg_catalog.database` table implementation.
pub(super) struct PGDatabase {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
// Workaround to convert schema_name to a numeric id
namespace_oid_map: PGNamespaceOidMapRef,
}
impl std::fmt::Debug for PGDatabase {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PGDatabase")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl PGDatabase {
pub(super) fn new(
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema: Self::schema(),
catalog_name,
catalog_manager,
namespace_oid_map,
}
}
fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
u32_column(OID_COLUMN_NAME),
string_column(DATNAME),
]))
}
fn builder(&self) -> PGCDatabaseBuilder {
PGCDatabaseBuilder::new(
self.schema.clone(),
self.catalog_name.clone(),
self.catalog_manager.clone(),
self.namespace_oid_map.clone(),
)
}
}
impl DfPartitionStream for PGDatabase {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}
impl SystemTable for PGDatabase {
fn table_id(&self) -> table::metadata::TableId {
PG_CATALOG_PG_DATABASE_TABLE_ID
}
fn table_name(&self) -> &'static str {
PG_DATABASE
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(
&self,
request: ScanRequest,
) -> Result<common_recordbatch::SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_database(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
/// Builds the `pg_catalog.pg_database` table row by row
/// `oid` use schema name as a workaround since we don't have numeric schema id.
/// `nspname` is the schema name.
struct PGCDatabaseBuilder {
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
oid: UInt32VectorBuilder,
datname: StringVectorBuilder,
}
impl PGCDatabaseBuilder {
fn new(
schema: SchemaRef,
catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
namespace_oid_map: PGNamespaceOidMapRef,
) -> Self {
Self {
schema,
catalog_name,
catalog_manager,
namespace_oid_map,
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let catalog_name = self.catalog_name.clone();
let catalog_manager = self
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager
.schema_names(&catalog_name, query_ctx())
.await?
{
self.add_database(&predicates, &schema_name);
}
self.finish()
}
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
let oid = self.namespace_oid_map.get_oid(schema_name);
let row: [(&str, &Value); 2] = [
(OID_COLUMN_NAME, &Value::from(oid)),
(DATNAME, &Value::from(schema_name)),
];
if !predicates.eval(&row) {
return;
}
self.oid.push(Some(oid));
self.datname.push(Some(schema_name));
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> =
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}

View File

@@ -17,7 +17,6 @@
pub(super) mod oid_map; pub(super) mod oid_map;
use std::fmt;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -88,15 +87,6 @@ impl PGNamespace {
} }
} }
impl fmt::Debug for PGNamespace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PGNamespace")
.field("schema", &self.schema)
.field("catalog_name", &self.catalog_name)
.finish()
}
}
impl SystemTable for PGNamespace { impl SystemTable for PGNamespace {
fn schema(&self) -> SchemaRef { fn schema(&self) -> SchemaRef {
self.schema.clone() self.schema.clone()

View File

@@ -12,11 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// https://www.postgresql.org/docs/current/catalog-pg-database.html pub const PG_DATABASE: &str = "pg_databases";
pub const PG_DATABASE: &str = "pg_database";
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
pub const PG_NAMESPACE: &str = "pg_namespace"; pub const PG_NAMESPACE: &str = "pg_namespace";
// https://www.postgresql.org/docs/current/catalog-pg-class.html
pub const PG_CLASS: &str = "pg_class"; pub const PG_CLASS: &str = "pg_class";
// https://www.postgresql.org/docs/current/catalog-pg-type.html
pub const PG_TYPE: &str = "pg_type"; pub const PG_TYPE: &str = "pg_type";

View File

@@ -365,7 +365,7 @@ mod tests {
Projection: person.id AS a, person.name AS b Projection: person.id AS a, person.name AS b
Filter: person.id > Int32(500) Filter: person.id > Int32(500)
TableScan: person"#, TableScan: person"#,
format!("\n{}", source.get_logical_plan().unwrap()) format!("\n{:?}", source.get_logical_plan().unwrap())
); );
} }
} }

View File

@@ -15,12 +15,12 @@
//! Dummy catalog for region server. //! Dummy catalog for region server.
use std::any::Any; use std::any::Any;
use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use common_catalog::format_full_table_name; use common_catalog::format_full_table_name;
use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider}; use datafusion::catalog::schema::SchemaProvider;
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
use datafusion::datasource::TableProvider; use datafusion::datasource::TableProvider;
use snafu::OptionExt; use snafu::OptionExt;
use table::table::adapter::DfTableProviderAdapter; use table::table::adapter::DfTableProviderAdapter;
@@ -41,12 +41,6 @@ impl DummyCatalogList {
} }
} }
impl fmt::Debug for DummyCatalogList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummyCatalogList").finish()
}
}
impl CatalogProviderList for DummyCatalogList { impl CatalogProviderList for DummyCatalogList {
fn as_any(&self) -> &dyn Any { fn as_any(&self) -> &dyn Any {
self self
@@ -97,14 +91,6 @@ impl CatalogProvider for DummyCatalogProvider {
} }
} }
impl fmt::Debug for DummyCatalogProvider {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummyCatalogProvider")
.field("catalog_name", &self.catalog_name)
.finish()
}
}
/// A dummy schema provider for [DummyCatalogList]. /// A dummy schema provider for [DummyCatalogList].
#[derive(Clone)] #[derive(Clone)]
struct DummySchemaProvider { struct DummySchemaProvider {
@@ -141,12 +127,3 @@ impl SchemaProvider for DummySchemaProvider {
true true
} }
} }
impl fmt::Debug for DummySchemaProvider {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DummySchemaProvider")
.field("catalog_name", &self.catalog_name)
.field("schema_name", &self.schema_name)
.finish()
}
}

View File

@@ -4,9 +4,6 @@ version.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
[features]
pg_kvbackend = ["common-meta/pg_kvbackend"]
[lints] [lints]
workspace = true workspace = true
@@ -59,6 +56,7 @@ tokio.workspace = true
tracing-appender.workspace = true tracing-appender.workspace = true
[dev-dependencies] [dev-dependencies]
common-test-util.workspace = true
common-version.workspace = true common-version.workspace = true
serde.workspace = true serde.workspace = true
tempfile.workspace = true tempfile.workspace = true

View File

@@ -22,13 +22,9 @@ use clap::Parser;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::etcd::EtcdStore; use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::memory::MemoryKvBackend;
#[cfg(feature = "pg_kvbackend")]
use common_meta::kv_backend::rds::PgStore;
use common_meta::peer::Peer; use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute}; use common_meta::rpc::router::{Region, RegionRoute};
use common_telemetry::info; use common_telemetry::info;
use common_wal::options::WalOptions;
use datatypes::data_type::ConcreteDataType; use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema}; use datatypes::schema::{ColumnSchema, RawSchema};
use rand::Rng; use rand::Rng;
@@ -59,34 +55,18 @@ where
#[derive(Debug, Default, Parser)] #[derive(Debug, Default, Parser)]
pub struct BenchTableMetadataCommand { pub struct BenchTableMetadataCommand {
#[clap(long)] #[clap(long)]
etcd_addr: Option<String>, etcd_addr: String,
#[cfg(feature = "pg_kvbackend")]
#[clap(long)]
postgres_addr: Option<String>,
#[clap(long)] #[clap(long)]
count: u32, count: u32,
} }
impl BenchTableMetadataCommand { impl BenchTableMetadataCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> { pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr { let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
info!("Using etcd as kv backend"); .await
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap() .unwrap();
} else {
Arc::new(MemoryKvBackend::new())
};
#[cfg(feature = "pg_kvbackend")] let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
info!("Using postgres as kv backend");
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
.await
.unwrap()
} else {
kv_backend
};
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
let tool = BenchTableMetadata { let tool = BenchTableMetadata {
table_metadata_manager, table_metadata_manager,
@@ -185,7 +165,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
region_routes region_routes
} }
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, WalOptions> { fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
// TODO(niebayes): construct region wal options for benchmark. // TODO(niebayes): construct region wal options for benchmark.
let _ = regions; let _ = regions;
HashMap::default() HashMap::default()

View File

@@ -49,12 +49,7 @@ impl TableMetadataBencher {
let regions: Vec<_> = (0..64).collect(); let regions: Vec<_> = (0..64).collect();
let region_routes = create_region_routes(regions.clone()); let region_routes = create_region_routes(regions.clone());
let region_wal_options = create_region_wal_options(regions) let region_wal_options = create_region_wal_options(regions);
.into_iter()
.map(|(region_id, wal_options)| {
(region_id, serde_json::to_string(&wal_options).unwrap())
})
.collect();
let start = Instant::now(); let start = Instant::now();
@@ -114,17 +109,9 @@ impl TableMetadataBencher {
let table_info = table_info.unwrap(); let table_info = table_info.unwrap();
let table_route = table_route.unwrap(); let table_route = table_route.unwrap();
let table_id = table_info.table_info.ident.table_id; let table_id = table_info.table_info.ident.table_id;
let regions: Vec<_> = (0..64).collect();
let region_wal_options = create_region_wal_options(regions);
let _ = self let _ = self
.table_metadata_manager .table_metadata_manager
.delete_table_metadata( .delete_table_metadata(table_id, &table_info.table_name(), &table_route)
table_id,
&table_info.table_name(),
&table_route,
&region_wal_options,
)
.await; .await;
start.elapsed() start.elapsed()
}, },

View File

@@ -17,7 +17,6 @@ use std::time::Duration;
use base64::engine::general_purpose; use base64::engine::general_purpose;
use base64::Engine; use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use humantime::format_duration; use humantime::format_duration;
use serde_json::Value; use serde_json::Value;
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT; use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
@@ -25,9 +24,7 @@ use servers::http::result::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput; use servers::http::GreptimeQueryOutput;
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{ use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
BuildClientSnafu, HttpQuerySqlSnafu, ParseProxyOptsSnafu, Result, SerdeJsonSnafu,
};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct DatabaseClient { pub struct DatabaseClient {
@@ -35,23 +32,6 @@ pub struct DatabaseClient {
catalog: String, catalog: String,
auth_header: Option<String>, auth_header: Option<String>,
timeout: Duration, timeout: Duration,
proxy: Option<reqwest::Proxy>,
}
pub fn parse_proxy_opts(
proxy: Option<String>,
no_proxy: bool,
) -> std::result::Result<Option<reqwest::Proxy>, BoxedError> {
if no_proxy {
return Ok(None);
}
proxy
.map(|proxy| {
reqwest::Proxy::all(proxy)
.context(ParseProxyOptsSnafu)
.map_err(BoxedError::new)
})
.transpose()
} }
impl DatabaseClient { impl DatabaseClient {
@@ -60,7 +40,6 @@ impl DatabaseClient {
catalog: String, catalog: String,
auth_basic: Option<String>, auth_basic: Option<String>,
timeout: Duration, timeout: Duration,
proxy: Option<reqwest::Proxy>,
) -> Self { ) -> Self {
let auth_header = if let Some(basic) = auth_basic { let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic); let encoded = general_purpose::STANDARD.encode(basic);
@@ -69,18 +48,11 @@ impl DatabaseClient {
None None
}; };
if let Some(ref proxy) = proxy {
common_telemetry::info!("Using proxy: {:?}", proxy);
} else {
common_telemetry::info!("Using system proxy(if any)");
}
Self { Self {
addr, addr,
catalog, catalog,
auth_header, auth_header,
timeout, timeout,
proxy,
} }
} }
@@ -95,13 +67,7 @@ impl DatabaseClient {
("db", format!("{}-{}", self.catalog, schema)), ("db", format!("{}-{}", self.catalog, schema)),
("sql", sql.to_string()), ("sql", sql.to_string()),
]; ];
let client = self let mut request = reqwest::Client::new()
.proxy
.clone()
.map(|proxy| reqwest::Client::builder().proxy(proxy).build())
.unwrap_or_else(|| Ok(reqwest::Client::new()))
.context(BuildClientSnafu)?;
let mut request = client
.post(&url) .post(&url)
.form(&params) .form(&params)
.header("Content-Type", "application/x-www-form-urlencoded"); .header("Content-Type", "application/x-www-form-urlencoded");

View File

@@ -86,22 +86,6 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Failed to parse proxy options: {}", error))]
ParseProxyOpts {
#[snafu(source)]
error: reqwest::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to build reqwest client: {}", error))]
BuildClient {
#[snafu(implicit)]
location: Location,
#[snafu(source)]
error: reqwest::Error,
},
#[snafu(display("Invalid REPL command: {reason}"))] #[snafu(display("Invalid REPL command: {reason}"))]
InvalidReplCommand { reason: String }, InvalidReplCommand { reason: String },
@@ -294,8 +278,7 @@ impl ErrorExt for Error {
| Error::InitTimezone { .. } | Error::InitTimezone { .. }
| Error::ConnectEtcd { .. } | Error::ConnectEtcd { .. }
| Error::CreateDir { .. } | Error::CreateDir { .. }
| Error::EmptyResult { .. } | Error::EmptyResult { .. } => StatusCode::InvalidArguments,
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. } Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(), | Error::StopProcedureManager { source, .. } => source.status_code(),
@@ -315,8 +298,7 @@ impl ErrorExt for Error {
Error::SerdeJson { .. } Error::SerdeJson { .. }
| Error::FileIo { .. } | Error::FileIo { .. }
| Error::SpawnThread { .. } | Error::SpawnThread { .. }
| Error::InitTlsProvider { .. } | Error::InitTlsProvider { .. } => StatusCode::Unexpected,
| Error::BuildClient { .. } => StatusCode::Unexpected,
Error::Other { source, .. } => source.status_code(), Error::Other { source, .. } => source.status_code(),

View File

@@ -28,7 +28,7 @@ use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use tokio::time::Instant; use tokio::time::Instant;
use crate::database::{parse_proxy_opts, DatabaseClient}; use crate::database::DatabaseClient;
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu}; use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
use crate::{database, Tool}; use crate::{database, Tool};
@@ -91,30 +91,19 @@ pub struct ExportCommand {
/// The default behavior will disable server-side default timeout(i.e. `0s`). /// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)] #[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>, timeout: Option<Duration>,
/// The proxy server address to connect, if set, will override the system proxy.
///
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
#[clap(long)]
proxy: Option<String>,
/// Disable proxy server, if set, will not use any proxy.
#[clap(long)]
no_proxy: bool,
} }
impl ExportCommand { impl ExportCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> { pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let (catalog, schema) = let (catalog, schema) =
database::split_database(&self.database).map_err(BoxedError::new)?; database::split_database(&self.database).map_err(BoxedError::new)?;
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
let database_client = DatabaseClient::new( let database_client = DatabaseClient::new(
self.addr.clone(), self.addr.clone(),
catalog.clone(), catalog.clone(),
self.auth_basic.clone(), self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout. // Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(), self.timeout.unwrap_or_default(),
proxy,
); );
Ok(Box::new(Export { Ok(Box::new(Export {

View File

@@ -25,7 +25,7 @@ use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use tokio::time::Instant; use tokio::time::Instant;
use crate::database::{parse_proxy_opts, DatabaseClient}; use crate::database::DatabaseClient;
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu}; use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
use crate::{database, Tool}; use crate::{database, Tool};
@@ -76,30 +76,18 @@ pub struct ImportCommand {
/// The default behavior will disable server-side default timeout(i.e. `0s`). /// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)] #[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>, timeout: Option<Duration>,
/// The proxy server address to connect, if set, will override the system proxy.
///
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
#[clap(long)]
proxy: Option<String>,
/// Disable proxy server, if set, will not use any proxy.
#[clap(long, default_value = "false")]
no_proxy: bool,
} }
impl ImportCommand { impl ImportCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> { pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let (catalog, schema) = let (catalog, schema) =
database::split_database(&self.database).map_err(BoxedError::new)?; database::split_database(&self.database).map_err(BoxedError::new)?;
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
let database_client = DatabaseClient::new( let database_client = DatabaseClient::new(
self.addr.clone(), self.addr.clone(),
catalog.clone(), catalog.clone(),
self.auth_basic.clone(), self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout. // Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(), self.timeout.unwrap_or_default(),
proxy,
); );
Ok(Box::new(Import { Ok(Box::new(Import {

View File

@@ -10,8 +10,9 @@ name = "greptime"
path = "src/bin/greptime.rs" path = "src/bin/greptime.rs"
[features] [features]
default = ["servers/pprof", "servers/mem-prof"] default = ["python", "servers/pprof", "servers/mem-prof"]
tokio-console = ["common-telemetry/tokio-console"] tokio-console = ["common-telemetry/tokio-console"]
python = ["frontend/python"]
[lints] [lints]
workspace = true workspace = true
@@ -57,7 +58,6 @@ humantime.workspace = true
lazy_static.workspace = true lazy_static.workspace = true
meta-client.workspace = true meta-client.workspace = true
meta-srv.workspace = true meta-srv.workspace = true
metric-engine.workspace = true
mito2.workspace = true mito2.workspace = true
moka.workspace = true moka.workspace = true
nu-ansi-term = "0.46" nu-ansi-term = "0.46"

View File

@@ -51,7 +51,8 @@ impl App for Instance {
} }
async fn start(&mut self) -> Result<()> { async fn start(&mut self) -> Result<()> {
self.start().await self.start().await.unwrap();
Ok(())
} }
fn wait_signal(&self) -> bool { fn wait_signal(&self) -> bool {

View File

@@ -63,7 +63,9 @@ impl Instance {
&self.datanode &self.datanode
} }
/// allow customizing datanode for downstream projects /// Get mutable Datanode instance for changing some internal state, before starting it.
// Useful for wrapping Datanode instance. Please do not remove this method even if you find
// nowhere it is called.
pub fn datanode_mut(&mut self) -> &mut Datanode { pub fn datanode_mut(&mut self) -> &mut Datanode {
&mut self.datanode &mut self.datanode
} }
@@ -126,14 +128,10 @@ impl SubCommand {
struct StartCommand { struct StartCommand {
#[clap(long)] #[clap(long)]
node_id: Option<u64>, node_id: Option<u64>,
/// The address to bind the gRPC server. #[clap(long)]
#[clap(long, alias = "rpc-addr")] rpc_addr: Option<String>,
rpc_bind_addr: Option<String>, #[clap(long)]
/// The address advertised to the metasrv, and used for connections from outside the host. rpc_hostname: Option<String>,
/// If left empty or unset, the server will automatically use the IP address of the first network interface
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
#[clap(long, alias = "rpc-hostname")]
rpc_server_addr: Option<String>,
#[clap(long, value_delimiter = ',', num_args = 1..)] #[clap(long, value_delimiter = ',', num_args = 1..)]
metasrv_addrs: Option<Vec<String>>, metasrv_addrs: Option<Vec<String>>,
#[clap(short, long)] #[clap(short, long)]
@@ -185,18 +183,18 @@ impl StartCommand {
tokio_console_addr: global_options.tokio_console_addr.clone(), tokio_console_addr: global_options.tokio_console_addr.clone(),
}; };
if let Some(addr) = &self.rpc_bind_addr { if let Some(addr) = &self.rpc_addr {
opts.grpc.bind_addr.clone_from(addr); opts.grpc.addr.clone_from(addr);
} else if let Some(addr) = &opts.rpc_addr { } else if let Some(addr) = &opts.rpc_addr {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead."); warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead.");
opts.grpc.bind_addr.clone_from(addr); opts.grpc.addr.clone_from(addr);
} }
if let Some(server_addr) = &self.rpc_server_addr { if let Some(hostname) = &self.rpc_hostname {
opts.grpc.server_addr.clone_from(server_addr); opts.grpc.hostname.clone_from(hostname);
} else if let Some(server_addr) = &opts.rpc_hostname { } else if let Some(hostname) = &opts.rpc_hostname {
warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead."); warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead.");
opts.grpc.server_addr.clone_from(server_addr); opts.grpc.hostname.clone_from(hostname);
} }
if let Some(runtime_size) = opts.rpc_runtime_size { if let Some(runtime_size) = opts.rpc_runtime_size {
@@ -280,8 +278,7 @@ impl StartCommand {
info!("Datanode options: {:#?}", opts); info!("Datanode options: {:#?}", opts);
let plugin_opts = opts.plugins; let plugin_opts = opts.plugins;
let mut opts = opts.component; let opts = opts.component;
opts.grpc.detect_server_addr();
let mut plugins = Plugins::new(); let mut plugins = Plugins::new();
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts) plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &opts)
.await .await
@@ -361,8 +358,8 @@ mod tests {
rpc_addr = "127.0.0.1:4001" rpc_addr = "127.0.0.1:4001"
rpc_hostname = "192.168.0.1" rpc_hostname = "192.168.0.1"
[grpc] [grpc]
bind_addr = "127.0.0.1:3001" addr = "127.0.0.1:3001"
server_addr = "127.0.0.1" hostname = "127.0.0.1"
runtime_size = 8 runtime_size = 8
"#; "#;
write!(file, "{}", toml_str).unwrap(); write!(file, "{}", toml_str).unwrap();
@@ -373,8 +370,8 @@ mod tests {
}; };
let options = cmd.load_options(&Default::default()).unwrap().component; let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:4001".to_string(), options.grpc.bind_addr); assert_eq!("127.0.0.1:4001".to_string(), options.grpc.addr);
assert_eq!("192.168.0.1".to_string(), options.grpc.server_addr); assert_eq!("192.168.0.1".to_string(), options.grpc.hostname);
} }
#[test] #[test]
@@ -435,7 +432,7 @@ mod tests {
let options = cmd.load_options(&Default::default()).unwrap().component; let options = cmd.load_options(&Default::default()).unwrap().component;
assert_eq!("127.0.0.1:3001".to_string(), options.grpc.bind_addr); assert_eq!("127.0.0.1:3001".to_string(), options.grpc.addr);
assert_eq!(Some(42), options.node_id); assert_eq!(Some(42), options.node_id);
let DatanodeWalConfig::RaftEngine(raft_engine_config) = options.wal else { let DatanodeWalConfig::RaftEngine(raft_engine_config) = options.wal else {
@@ -649,7 +646,7 @@ mod tests {
opts.http.addr, opts.http.addr,
DatanodeOptions::default().component.http.addr DatanodeOptions::default().component.http.addr
); );
assert_eq!(opts.grpc.server_addr, "10.103.174.219"); assert_eq!(opts.grpc.hostname, "10.103.174.219");
}, },
); );
} }

View File

@@ -345,13 +345,6 @@ pub enum Error {
#[snafu(implicit)] #[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Failed to build wal options allocator"))]
BuildWalOptionsAllocator {
#[snafu(implicit)]
location: Location,
source: common_meta::error::Error,
},
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -385,8 +378,7 @@ impl ErrorExt for Error {
Error::StartProcedureManager { source, .. } Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(), | Error::StopProcedureManager { source, .. } => source.status_code(),
Error::BuildWalOptionsAllocator { source, .. } Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
| Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => { Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
StatusCode::Internal StatusCode::Internal
} }

View File

@@ -13,7 +13,6 @@
// limitations under the License. // limitations under the License.
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry}; use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension; use catalog::information_extension::DistributedInformationExtension;
@@ -67,11 +66,6 @@ impl Instance {
pub fn flownode(&self) -> &FlownodeInstance { pub fn flownode(&self) -> &FlownodeInstance {
&self.flownode &self.flownode
} }
/// allow customizing flownode for downstream projects
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -129,13 +123,11 @@ struct StartCommand {
#[clap(long)] #[clap(long)]
node_id: Option<u64>, node_id: Option<u64>,
/// Bind address for the gRPC server. /// Bind address for the gRPC server.
#[clap(long, alias = "rpc-addr")] #[clap(long)]
rpc_bind_addr: Option<String>, rpc_addr: Option<String>,
/// The address advertised to the metasrv, and used for connections from outside the host. /// Hostname for the gRPC server.
/// If left empty or unset, the server will automatically use the IP address of the first network interface #[clap(long)]
/// on the host, with the same port number as the one specified in `rpc_bind_addr`. rpc_hostname: Option<String>,
#[clap(long, alias = "rpc-hostname")]
rpc_server_addr: Option<String>,
/// Metasrv address list; /// Metasrv address list;
#[clap(long, value_delimiter = ',', num_args = 1..)] #[clap(long, value_delimiter = ',', num_args = 1..)]
metasrv_addrs: Option<Vec<String>>, metasrv_addrs: Option<Vec<String>>,
@@ -145,11 +137,6 @@ struct StartCommand {
/// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`; /// The prefix of environment variables, default is `GREPTIMEDB_FLOWNODE`;
#[clap(long, default_value = "GREPTIMEDB_FLOWNODE")] #[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
env_prefix: String, env_prefix: String,
#[clap(long)]
http_addr: Option<String>,
/// HTTP request timeout in seconds.
#[clap(long)]
http_timeout: Option<u64>,
} }
impl StartCommand { impl StartCommand {
@@ -186,12 +173,12 @@ impl StartCommand {
tokio_console_addr: global_options.tokio_console_addr.clone(), tokio_console_addr: global_options.tokio_console_addr.clone(),
}; };
if let Some(addr) = &self.rpc_bind_addr { if let Some(addr) = &self.rpc_addr {
opts.grpc.bind_addr.clone_from(addr); opts.grpc.addr.clone_from(addr);
} }
if let Some(server_addr) = &self.rpc_server_addr { if let Some(hostname) = &self.rpc_hostname {
opts.grpc.server_addr.clone_from(server_addr); opts.grpc.hostname.clone_from(hostname);
} }
if let Some(node_id) = self.node_id { if let Some(node_id) = self.node_id {
@@ -206,14 +193,6 @@ impl StartCommand {
opts.mode = Mode::Distributed; opts.mode = Mode::Distributed;
} }
if let Some(http_addr) = &self.http_addr {
opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
opts.http.timeout = Duration::from_secs(http_timeout);
}
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) { if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
return MissingConfigSnafu { return MissingConfigSnafu {
msg: "Missing node id option", msg: "Missing node id option",
@@ -238,8 +217,7 @@ impl StartCommand {
info!("Flownode start command: {:#?}", self); info!("Flownode start command: {:#?}", self);
info!("Flownode options: {:#?}", opts); info!("Flownode options: {:#?}", opts);
let mut opts = opts.component; let opts = opts.component;
opts.grpc.detect_server_addr();
// TODO(discord9): make it not optionale after cluster id is required // TODO(discord9): make it not optionale after cluster id is required
let cluster_id = opts.cluster_id.unwrap_or(0); let cluster_id = opts.cluster_id.unwrap_or(0);

View File

@@ -136,19 +136,13 @@ impl SubCommand {
#[derive(Debug, Default, Parser)] #[derive(Debug, Default, Parser)]
pub struct StartCommand { pub struct StartCommand {
/// The address to bind the gRPC server.
#[clap(long, alias = "rpc-addr")]
rpc_bind_addr: Option<String>,
/// The address advertised to the metasrv, and used for connections from outside the host.
/// If left empty or unset, the server will automatically use the IP address of the first network interface
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
#[clap(long, alias = "rpc-hostname")]
rpc_server_addr: Option<String>,
#[clap(long)] #[clap(long)]
http_addr: Option<String>, http_addr: Option<String>,
#[clap(long)] #[clap(long)]
http_timeout: Option<u64>, http_timeout: Option<u64>,
#[clap(long)] #[clap(long)]
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>, mysql_addr: Option<String>,
#[clap(long)] #[clap(long)]
postgres_addr: Option<String>, postgres_addr: Option<String>,
@@ -224,15 +218,11 @@ impl StartCommand {
opts.http.disable_dashboard = disable_dashboard; opts.http.disable_dashboard = disable_dashboard;
} }
if let Some(addr) = &self.rpc_bind_addr { if let Some(addr) = &self.rpc_addr {
opts.grpc.bind_addr.clone_from(addr); opts.grpc.addr.clone_from(addr);
opts.grpc.tls = tls_opts.clone(); opts.grpc.tls = tls_opts.clone();
} }
if let Some(addr) = &self.rpc_server_addr {
opts.grpc.server_addr.clone_from(addr);
}
if let Some(addr) = &self.mysql_addr { if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true; opts.mysql.enable = true;
opts.mysql.addr.clone_from(addr); opts.mysql.addr.clone_from(addr);
@@ -278,8 +268,7 @@ impl StartCommand {
info!("Frontend options: {:#?}", opts); info!("Frontend options: {:#?}", opts);
let plugin_opts = opts.plugins; let plugin_opts = opts.plugins;
let mut opts = opts.component; let opts = opts.component;
opts.grpc.detect_server_addr();
let mut plugins = Plugins::new(); let mut plugins = Plugins::new();
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts) plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
.await .await
@@ -423,7 +412,7 @@ mod tests {
let default_opts = FrontendOptions::default().component; let default_opts = FrontendOptions::default().component;
assert_eq!(opts.grpc.bind_addr, default_opts.grpc.bind_addr); assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
assert!(opts.mysql.enable); assert!(opts.mysql.enable);
assert_eq!(opts.mysql.runtime_size, default_opts.mysql.runtime_size); assert_eq!(opts.mysql.runtime_size, default_opts.mysql.runtime_size);
assert!(opts.postgres.enable); assert!(opts.postgres.enable);
@@ -614,7 +603,7 @@ mod tests {
assert_eq!(fe_opts.http.addr, "127.0.0.1:14000"); assert_eq!(fe_opts.http.addr, "127.0.0.1:14000");
// Should be default value. // Should be default value.
assert_eq!(fe_opts.grpc.bind_addr, GrpcOptions::default().bind_addr); assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
}, },
); );
} }

View File

@@ -42,7 +42,7 @@ pub struct Instance {
} }
impl Instance { impl Instance {
pub fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self { fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self {
Self { Self {
instance, instance,
_guard: guard, _guard: guard,
@@ -133,15 +133,11 @@ impl SubCommand {
#[derive(Debug, Default, Parser)] #[derive(Debug, Default, Parser)]
struct StartCommand { struct StartCommand {
/// The address to bind the gRPC server. #[clap(long)]
#[clap(long, alias = "bind-addr")] bind_addr: Option<String>,
rpc_bind_addr: Option<String>, #[clap(long)]
/// The communication server address for the frontend and datanode to connect to metasrv. server_addr: Option<String>,
/// If left empty or unset, the server will automatically use the IP address of the first network interface #[clap(long, aliases = ["store-addr"], value_delimiter = ',', num_args = 1..)]
/// on the host, with the same port number as the one specified in `rpc_bind_addr`.
#[clap(long, alias = "server-addr")]
rpc_server_addr: Option<String>,
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
store_addrs: Option<Vec<String>>, store_addrs: Option<Vec<String>>,
#[clap(short, long)] #[clap(short, long)]
config_file: Option<String>, config_file: Option<String>,
@@ -205,11 +201,11 @@ impl StartCommand {
tokio_console_addr: global_options.tokio_console_addr.clone(), tokio_console_addr: global_options.tokio_console_addr.clone(),
}; };
if let Some(addr) = &self.rpc_bind_addr { if let Some(addr) = &self.bind_addr {
opts.bind_addr.clone_from(addr); opts.bind_addr.clone_from(addr);
} }
if let Some(addr) = &self.rpc_server_addr { if let Some(addr) = &self.server_addr {
opts.server_addr.clone_from(addr); opts.server_addr.clone_from(addr);
} }
@@ -253,6 +249,8 @@ impl StartCommand {
if let Some(backend) = &self.backend { if let Some(backend) = &self.backend {
opts.backend.clone_from(backend); opts.backend.clone_from(backend);
} else {
opts.backend = BackendImpl::default()
} }
// Disable dashboard in metasrv. // Disable dashboard in metasrv.
@@ -273,13 +271,10 @@ impl StartCommand {
log_versions(version(), short_version(), APP_NAME); log_versions(version(), short_version(), APP_NAME);
info!("Metasrv start command: {:#?}", self); info!("Metasrv start command: {:#?}", self);
let plugin_opts = opts.plugins;
let mut opts = opts.component;
opts.detect_server_addr();
info!("Metasrv options: {:#?}", opts); info!("Metasrv options: {:#?}", opts);
let plugin_opts = opts.plugins;
let opts = opts.component;
let mut plugins = Plugins::new(); let mut plugins = Plugins::new();
plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts) plugins::setup_metasrv_plugins(&mut plugins, &plugin_opts, &opts)
.await .await
@@ -312,8 +307,8 @@ mod tests {
#[test] #[test]
fn test_read_from_cmd() { fn test_read_from_cmd() {
let cmd = StartCommand { let cmd = StartCommand {
rpc_bind_addr: Some("127.0.0.1:3002".to_string()), bind_addr: Some("127.0.0.1:3002".to_string()),
rpc_server_addr: Some("127.0.0.1:3002".to_string()), server_addr: Some("127.0.0.1:3002".to_string()),
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]), store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
selector: Some("LoadBased".to_string()), selector: Some("LoadBased".to_string()),
..Default::default() ..Default::default()
@@ -387,8 +382,8 @@ mod tests {
#[test] #[test]
fn test_load_log_options_from_cli() { fn test_load_log_options_from_cli() {
let cmd = StartCommand { let cmd = StartCommand {
rpc_bind_addr: Some("127.0.0.1:3002".to_string()), bind_addr: Some("127.0.0.1:3002".to_string()),
rpc_server_addr: Some("127.0.0.1:3002".to_string()), server_addr: Some("127.0.0.1:3002".to_string()),
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]), store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
selector: Some("LoadBased".to_string()), selector: Some("LoadBased".to_string()),
..Default::default() ..Default::default()

View File

@@ -43,7 +43,7 @@ use common_meta::node_manager::NodeManagerRef;
use common_meta::peer::Peer; use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper; use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::sequence::SequenceBuilder; use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef}; use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
use common_procedure::{ProcedureInfo, ProcedureManagerRef}; use common_procedure::{ProcedureInfo, ProcedureManagerRef};
use common_telemetry::info; use common_telemetry::info;
use common_telemetry::logging::{LoggingOptions, TracingOptions}; use common_telemetry::logging::{LoggingOptions, TracingOptions};
@@ -54,14 +54,13 @@ use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, Sto
use datanode::datanode::{Datanode, DatanodeBuilder}; use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer; use datanode::region_server::RegionServer;
use file_engine::config::EngineConfig as FileEngineConfig; use file_engine::config::EngineConfig as FileEngineConfig;
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker}; use flow::{FlowWorkerManager, FlownodeBuilder, FrontendInvoker};
use frontend::frontend::FrontendOptions; use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder; use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager}; use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::server::Services; use frontend::server::Services;
use frontend::service_config::{ use frontend::service_config::{
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
PromStoreOptions,
}; };
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ}; use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig; use mito2::config::MitoConfig;
@@ -77,10 +76,10 @@ use tokio::sync::{broadcast, RwLock};
use tracing_appender::non_blocking::WorkerGuard; use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{ use crate::error::{
BuildCacheRegistrySnafu, BuildWalOptionsAllocatorSnafu, CreateDirSnafu, IllegalConfigSnafu, BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, Result,
Result, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu,
StartDatanodeSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
}; };
use crate::options::{GlobalOptions, GreptimeOptions}; use crate::options::{GlobalOptions, GreptimeOptions};
@@ -141,13 +140,11 @@ pub struct StandaloneOptions {
pub postgres: PostgresOptions, pub postgres: PostgresOptions,
pub opentsdb: OpentsdbOptions, pub opentsdb: OpentsdbOptions,
pub influxdb: InfluxdbOptions, pub influxdb: InfluxdbOptions,
pub jaeger: JaegerOptions,
pub prom_store: PromStoreOptions, pub prom_store: PromStoreOptions,
pub wal: DatanodeWalConfig, pub wal: DatanodeWalConfig,
pub storage: StorageConfig, pub storage: StorageConfig,
pub metadata_store: KvBackendConfig, pub metadata_store: KvBackendConfig,
pub procedure: ProcedureConfig, pub procedure: ProcedureConfig,
pub flow: FlowConfig,
pub logging: LoggingOptions, pub logging: LoggingOptions,
pub user_provider: Option<String>, pub user_provider: Option<String>,
/// Options for different store engines. /// Options for different store engines.
@@ -171,13 +168,11 @@ impl Default for StandaloneOptions {
postgres: PostgresOptions::default(), postgres: PostgresOptions::default(),
opentsdb: OpentsdbOptions::default(), opentsdb: OpentsdbOptions::default(),
influxdb: InfluxdbOptions::default(), influxdb: InfluxdbOptions::default(),
jaeger: JaegerOptions::default(),
prom_store: PromStoreOptions::default(), prom_store: PromStoreOptions::default(),
wal: DatanodeWalConfig::default(), wal: DatanodeWalConfig::default(),
storage: StorageConfig::default(), storage: StorageConfig::default(),
metadata_store: KvBackendConfig::default(), metadata_store: KvBackendConfig::default(),
procedure: ProcedureConfig::default(), procedure: ProcedureConfig::default(),
flow: FlowConfig::default(),
logging: LoggingOptions::default(), logging: LoggingOptions::default(),
export_metrics: ExportMetricsOption::default(), export_metrics: ExportMetricsOption::default(),
user_provider: None, user_provider: None,
@@ -220,7 +215,6 @@ impl StandaloneOptions {
postgres: cloned_opts.postgres, postgres: cloned_opts.postgres,
opentsdb: cloned_opts.opentsdb, opentsdb: cloned_opts.opentsdb,
influxdb: cloned_opts.influxdb, influxdb: cloned_opts.influxdb,
jaeger: cloned_opts.jaeger,
prom_store: cloned_opts.prom_store, prom_store: cloned_opts.prom_store,
meta_client: None, meta_client: None,
logging: cloned_opts.logging, logging: cloned_opts.logging,
@@ -333,8 +327,8 @@ impl App for Instance {
pub struct StartCommand { pub struct StartCommand {
#[clap(long)] #[clap(long)]
http_addr: Option<String>, http_addr: Option<String>,
#[clap(long, alias = "rpc-addr")] #[clap(long)]
rpc_bind_addr: Option<String>, rpc_addr: Option<String>,
#[clap(long)] #[clap(long)]
mysql_addr: Option<String>, mysql_addr: Option<String>,
#[clap(long)] #[clap(long)]
@@ -411,9 +405,9 @@ impl StartCommand {
opts.storage.data_home.clone_from(data_home); opts.storage.data_home.clone_from(data_home);
} }
if let Some(addr) = &self.rpc_bind_addr { if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr // frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().grpc.bind_addr; let datanode_grpc_addr = DatanodeOptions::default().grpc.addr;
if addr.eq(&datanode_grpc_addr) { if addr.eq(&datanode_grpc_addr) {
return IllegalConfigSnafu { return IllegalConfigSnafu {
msg: format!( msg: format!(
@@ -421,7 +415,7 @@ impl StartCommand {
), ),
}.fail(); }.fail();
} }
opts.grpc.bind_addr.clone_from(addr) opts.grpc.addr.clone_from(addr)
} }
if let Some(addr) = &self.mysql_addr { if let Some(addr) = &self.mysql_addr {
@@ -467,8 +461,7 @@ impl StartCommand {
let mut plugins = Plugins::new(); let mut plugins = Plugins::new();
let plugin_opts = opts.plugins; let plugin_opts = opts.plugins;
let mut opts = opts.component; let opts = opts.component;
opts.grpc.detect_server_addr();
let fe_opts = opts.frontend_options(); let fe_opts = opts.frontend_options();
let dn_opts = opts.datanode_options(); let dn_opts = opts.datanode_options();
@@ -490,8 +483,8 @@ impl StartCommand {
let metadata_dir = metadata_store_dir(data_home); let metadata_dir = metadata_store_dir(data_home);
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components( let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
metadata_dir, metadata_dir,
opts.metadata_store, opts.metadata_store.clone(),
opts.procedure, opts.procedure.clone(),
) )
.await .await
.context(StartFrontendSnafu)?; .context(StartFrontendSnafu)?;
@@ -529,12 +522,8 @@ impl StartCommand {
Self::create_table_metadata_manager(kv_backend.clone()).await?; Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone())); let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let flownode_options = FlownodeOptions {
flow: opts.flow.clone(),
..Default::default()
};
let flow_builder = FlownodeBuilder::new( let flow_builder = FlownodeBuilder::new(
flownode_options, Default::default(),
plugins.clone(), plugins.clone(),
table_metadata_manager.clone(), table_metadata_manager.clone(),
catalog_manager.clone(), catalog_manager.clone(),
@@ -573,11 +562,10 @@ impl StartCommand {
.step(10) .step(10)
.build(), .build(),
); );
let kafka_options = opts.wal.clone().into(); let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
let wal_options_allocator = build_wal_options_allocator(&kafka_options, kv_backend.clone()) opts.wal.clone().into(),
.await kv_backend.clone(),
.context(BuildWalOptionsAllocatorSnafu)?; ));
let wal_options_allocator = Arc::new(wal_options_allocator);
let table_meta_allocator = Arc::new(TableMetadataAllocator::new( let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence, table_id_sequence,
wal_options_allocator.clone(), wal_options_allocator.clone(),
@@ -911,7 +899,7 @@ mod tests {
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr); assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
assert_eq!(Duration::from_secs(33), fe_opts.http.timeout); assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit); assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.bind_addr); assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
assert!(fe_opts.mysql.enable); assert!(fe_opts.mysql.enable);
assert_eq!("127.0.0.1:4002", fe_opts.mysql.addr); assert_eq!("127.0.0.1:4002", fe_opts.mysql.addr);
assert_eq!(2, fe_opts.mysql.runtime_size); assert_eq!(2, fe_opts.mysql.runtime_size);
@@ -1041,7 +1029,7 @@ mod tests {
assert_eq!(ReadableSize::mb(64), fe_opts.http.body_limit); assert_eq!(ReadableSize::mb(64), fe_opts.http.body_limit);
// Should be default value. // Should be default value.
assert_eq!(fe_opts.grpc.bind_addr, GrpcOptions::default().bind_addr); assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
}, },
); );
} }

Some files were not shown because too many files have changed in this diff Show More