Compare commits

..

55 Commits

Author SHA1 Message Date
evenyag
6247de2d50 chore: Revert "feat: prune in each partition"
This reverts commit 3f9bf48161.
2024-11-08 23:57:18 +08:00
evenyag
a2eb46132f feat: tokio dump 2024-11-08 23:08:47 +08:00
evenyag
3f9bf48161 feat: prune in each partition 2024-11-08 21:31:03 +08:00
evenyag
9bd2e006b5 feat: file output thread id 2024-11-08 20:35:40 +08:00
evenyag
031421ca91 feat: add thread id to log 2024-11-08 19:09:08 +08:00
evenyag
999f3a40c2 chore: Revert "chore: Revert "feat: yield after get ranges""
This reverts commit 770a850437.
2024-11-08 17:12:54 +08:00
evenyag
50d28e0a00 feat: add timeout to build ranges 2024-11-08 16:23:03 +08:00
evenyag
770a850437 chore: Revert "feat: yield after get ranges"
This reverts commit 65e53b5bc4.
2024-11-08 15:35:23 +08:00
evenyag
65e53b5bc4 feat: yield after get ranges 2024-11-08 01:28:39 +08:00
evenyag
9a6c7aa4d6 chore: log label 2024-11-08 01:08:02 +08:00
evenyag
4f446b95d8 chore: logs 2024-11-08 01:01:27 +08:00
evenyag
9ad4200f55 feat: only log for unordered scan 2024-11-08 00:58:29 +08:00
evenyag
53d456651f chore: range builder logs 2024-11-08 00:11:54 +08:00
evenyag
f11c5acb0f feat: logs for debug prune cost 2024-11-07 22:10:46 +08:00
evenyag
8536a1ec6e chore: logs to debug hang 2024-11-07 20:36:12 +08:00
evenyag
fce8c968da feat: gauge for scan partition 2024-11-07 16:55:40 +08:00
evenyag
98a6ac973c feat: log on merge scan region start/end 2024-11-07 16:48:03 +08:00
evenyag
8f79e421c3 chore: Revert "feat: remove too large files"
This reverts commit a22667bf3c.
2024-11-07 16:20:39 +08:00
evenyag
e8b326382f chore: fix compile 2024-11-07 00:28:19 +08:00
evenyag
56781e7fbc fix: skip expired files 2024-11-07 00:25:52 +08:00
evenyag
7d342b3d95 feat: small max file size 2024-11-06 23:31:16 +08:00
evenyag
a22667bf3c feat: remove too large files 2024-11-06 22:08:43 +08:00
evenyag
29b9b7db0c feat: support compression method 2024-11-06 18:58:20 +08:00
evenyag
a66909a562 chore: fix compile 2024-11-06 16:29:18 +08:00
evenyag
8137b8ff3d chore: more logs 2024-11-06 15:25:49 +08:00
Ruihang Xia
7c5cd2922a fix split logic
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-05 23:43:35 +08:00
evenyag
a1d0dcf2c3 chore: more logs 2024-11-05 20:25:05 +08:00
evenyag
c391171f99 feat: more logs 2024-11-05 20:18:35 +08:00
evenyag
f44862aaac feat: update log 2024-11-05 17:47:32 +08:00
evenyag
8bf795d88c chore: more logs 2024-11-05 16:22:54 +08:00
evenyag
3bbf4e0232 feat: log range meta 2024-11-05 16:01:55 +08:00
evenyag
83da3950da chore: debug 2024-11-05 15:33:42 +08:00
evenyag
957b5effd5 chore: fix compile 2024-11-05 15:32:35 +08:00
evenyag
f59e28006a feat: assert precision 2024-11-05 15:24:40 +08:00
evenyag
3e5bbdf71e feat: enable batch checker 2024-11-05 15:24:40 +08:00
Ruihang Xia
b8ac19c480 log on wrong range index
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-05 14:57:44 +08:00
evenyag
92b274a856 chore: log compute cost 2024-11-05 14:57:54 +08:00
evenyag
6bdac25f0a chore: more logs 2024-11-05 13:02:16 +08:00
evenyag
a9f3c4b17c chore: page reader metrics 2024-11-04 20:08:56 +08:00
evenyag
e003eaab36 chore: more log 2024-11-04 20:06:20 +08:00
evenyag
6e590da412 chore: remove compaction skip log 2024-11-04 19:40:42 +08:00
evenyag
ff5fa40b85 feat: skip wal 2024-11-04 19:40:41 +08:00
Ruihang Xia
d4aa4159d4 feat: support windowed sort with where condition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-04 19:34:03 +08:00
evenyag
960f6d821b feat: spawn block write wal 2024-11-04 17:35:12 +08:00
Ruihang Xia
9c5d044238 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 17:45:28 +08:00
Ruihang Xia
70c354eed6 fix: the way to retrieve time index column
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 12:10:12 +08:00
Ruihang Xia
23bf663d58 feat: handle sort that wont preserving partition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 22:13:36 +08:00
Ruihang Xia
817648eac5 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 15:38:12 +08:00
Ruihang Xia
03b29439e2 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-11 11:09:07 +08:00
Ruihang Xia
712f4ca0ef try sort partial commutative
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-09 21:08:59 +08:00
Ruihang Xia
60bacff57e ignore unmatched left and right greater
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:12:21 +08:00
Ruihang Xia
6208772ba4 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:02:04 +08:00
Ruihang Xia
67184c0498 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 14:30:47 +08:00
Ruihang Xia
1dd908fdf7 handle group by
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 12:50:13 +08:00
Ruihang Xia
8179b4798e feat: support transforming min/max/count aggr fn
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-04 22:17:31 +08:00
1325 changed files with 44152 additions and 96317 deletions

2
.github/CODEOWNERS vendored
View File

@@ -4,7 +4,7 @@
* @GreptimeTeam/db-approver * @GreptimeTeam/db-approver
## [Module] Database Engine ## [Module] Databse Engine
/src/index @zhongzc /src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia /src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag /src/query @evenyag

View File

@@ -41,14 +41,7 @@ runs:
username: ${{ inputs.dockerhub-image-registry-username }} username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }} password: ${{ inputs.dockerhub-image-registry-token }}
- name: Set up qemu for multi-platform builds - name: Build and push dev-builder-ubuntu image
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
shell: bash shell: bash
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }} if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
run: | run: |
@@ -59,7 +52,7 @@ runs:
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image # Only build image for amd64 platform. - name: Build and push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.build-dev-builder-centos == 'true' }} if: ${{ inputs.build-dev-builder-centos == 'true' }}
run: | run: |
@@ -76,7 +69,8 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=android \ BASE_IMAGE=android \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -54,7 +54,7 @@ runs:
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }} PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: ./target/$PROFILE_TARGET/greptime target-file: ./target/$PROFILE_TARGET/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
@@ -72,6 +72,6 @@ runs:
if: ${{ inputs.build-android-artifacts == 'true' }} if: ${{ inputs.build-android-artifacts == 'true' }}
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: ./target/aarch64-linux-android/release/greptime target-file: ./target/aarch64-linux-android/release/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}

View File

@@ -34,8 +34,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
runs: runs:
using: composite using: composite
steps: steps:
@@ -47,11 +47,7 @@ runs:
password: ${{ inputs.image-registry-password }} password: ${{ inputs.image-registry-password }}
- name: Set up qemu for multi-platform builds - name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v2
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up buildx - name: Set up buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2

View File

@@ -22,8 +22,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
dev-mode: dev-mode:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false
@@ -41,8 +41,8 @@ runs:
image-name: ${{ inputs.image-name }} image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }} image-tag: ${{ inputs.version }}
docker-file: docker/ci/ubuntu/Dockerfile docker-file: docker/ci/ubuntu/Dockerfile
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }} amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }} arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push-latest-tag: ${{ inputs.push-latest-tag }} push-latest-tag: ${{ inputs.push-latest-tag }}

View File

@@ -48,7 +48,20 @@ runs:
path: /tmp/greptime-*.log path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime # Builds standard greptime binary - name: Build standard greptime
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: pyo3_backend,servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
with: with:
base-image: ubuntu base-image: ubuntu

View File

@@ -90,5 +90,5 @@ runs:
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}

View File

@@ -33,6 +33,15 @@ runs:
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
shell: pwsh
run: pip install pyarrow numpy
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
@@ -47,6 +56,7 @@ runs:
shell: pwsh shell: pwsh
run: make test sqlness-test run: make test sqlness-test
env: env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state" SQLNESS_OPTS: "--preserve-state"
@@ -66,5 +76,5 @@ runs:
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}

View File

@@ -9,8 +9,8 @@ runs:
steps: steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to: # Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR} # ${WORKING_DIR}
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ... # ...

View File

@@ -51,8 +51,8 @@ inputs:
required: true required: true
upload-to-s3: upload-to-s3:
description: Upload to S3 description: Upload to S3
required: true required: false
default: 'false' default: 'true'
artifacts-dir: artifacts-dir:
description: Directory to store artifacts description: Directory to store artifacts
required: false required: false
@@ -64,11 +64,11 @@ inputs:
upload-max-retry-times: upload-max-retry-times:
description: Max retry times for uploading artifacts to S3 description: Max retry times for uploading artifacts to S3
required: false required: false
default: "30" default: "20"
upload-retry-timeout: upload-retry-timeout:
description: Timeout for uploading artifacts to S3 description: Timeout for uploading artifacts to S3
required: false required: false
default: "120" # minutes default: "30" # minutes
runs: runs:
using: composite using: composite
steps: steps:
@@ -77,21 +77,13 @@ runs:
with: with:
path: ${{ inputs.artifacts-dir }} path: ${{ inputs.artifacts-dir }}
- name: Install s5cmd
shell: bash
run: |
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
sudo mv s5cmd /usr/local/bin/
sudo chmod +x /usr/local/bin/s5cmd
- name: Release artifacts to cn region - name: Release artifacts to cn region
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }} if: ${{ inputs.upload-to-s3 == 'true' }}
env: env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }} AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }} AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_REGION: ${{ inputs.aws-cn-region }} AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }} UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with: with:
max_attempts: ${{ inputs.upload-max-retry-times }} max_attempts: ${{ inputs.upload-max-retry-times }}

View File

@@ -8,15 +8,15 @@ inputs:
default: 2 default: 2
description: "Number of Datanode replicas" description: "Number of Datanode replicas"
meta-replicas: meta-replicas:
default: 2 default: 3
description: "Number of Metasrv replicas" description: "Number of Metasrv replicas"
image-registry: image-registry:
default: "docker.io" default: "docker.io"
description: "Image registry" description: "Image registry"
image-repository: image-repository:
default: "greptime/greptimedb" default: "greptime/greptimedb"
description: "Image repository" description: "Image repository"
image-tag: image-tag:
default: "latest" default: "latest"
description: 'Image tag' description: 'Image tag'
etcd-endpoints: etcd-endpoints:
@@ -32,12 +32,12 @@ runs:
steps: steps:
- name: Install GreptimeDB operator - name: Install GreptimeDB operator
uses: nick-fields/retry@v3 uses: nick-fields/retry@v3
with: with:
timeout_minutes: 3 timeout_minutes: 3
max_attempts: 3 max_attempts: 3
shell: bash shell: bash
command: | command: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/ helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update helm repo update
helm upgrade \ helm upgrade \
--install \ --install \
@@ -48,18 +48,18 @@ runs:
--wait-for-jobs --wait-for-jobs
- name: Install GreptimeDB cluster - name: Install GreptimeDB cluster
shell: bash shell: bash
run: | run: |
helm upgrade \ helm upgrade \
--install my-greptimedb \ --install my-greptimedb \
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \ --set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \ --set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \ --set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \ --set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \ --set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \ --set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \ --set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=2000m \ --set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=3Gi \ --set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \ --set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \ --set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \ --set meta.replicas=${{ inputs.meta-replicas }} \
@@ -72,7 +72,7 @@ runs:
- name: Wait for GreptimeDB - name: Wait for GreptimeDB
shell: bash shell: bash
run: | run: |
while true; do while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}') PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready" echo "Cluster is ready"
@@ -86,10 +86,10 @@ runs:
- name: Print GreptimeDB info - name: Print GreptimeDB info
if: always() if: always()
shell: bash shell: bash
run: | run: |
kubectl get all --show-labels -n my-greptimedb kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes - name: Describe Nodes
if: always() if: always()
shell: bash shell: bash
run: | run: |
kubectl describe nodes kubectl describe nodes

View File

@@ -5,7 +5,7 @@ meta:
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -21,7 +21,7 @@ frontend:
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -5,7 +5,7 @@ meta:
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -17,7 +17,7 @@ frontend:
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -2,17 +2,16 @@ meta:
configData: |- configData: |-
[runtime] [runtime]
global_rt_size = 4 global_rt_size = 4
[wal] [wal]
provider = "kafka" provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3 num_topics = 3
auto_prune_interval = "30s"
trigger_flush_threshold = 100
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -22,14 +21,14 @@ datanode:
[wal] [wal]
provider = "kafka" provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
overwrite_entry_start_id = true linger = "2ms"
frontend: frontend:
configData: |- configData: |-
[runtime] [runtime]
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -18,8 +18,6 @@ runs:
--set controller.replicaCount=${{ inputs.controller-replicas }} \ --set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \ --set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \ --set controller.resources.requests.memory=128Mi \
--set controller.resources.limits.cpu=2000m \
--set controller.resources.limits.memory=2Gi \
--set listeners.controller.protocol=PLAINTEXT \ --set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \ --set listeners.client.protocol=PLAINTEXT \
--create-namespace \ --create-namespace \

View File

@@ -56,7 +56,7 @@ runs:
- name: Start EC2 runner - name: Start EC2 runner
if: startsWith(inputs.runner, 'ec2') if: startsWith(inputs.runner, 'ec2')
uses: machulav/ec2-github-runner@v2.3.8 uses: machulav/ec2-github-runner@v2
id: start-linux-arm64-ec2-runner id: start-linux-arm64-ec2-runner
with: with:
mode: start mode: start

View File

@@ -33,7 +33,7 @@ runs:
- name: Stop EC2 runner - name: Stop EC2 runner
if: ${{ inputs.label && inputs.ec2-instance-id }} if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: machulav/ec2-github-runner@v2.3.8 uses: machulav/ec2-github-runner@v2
with: with:
mode: stop mode: stop
label: ${{ inputs.label }} label: ${{ inputs.label }}

View File

@@ -4,8 +4,8 @@ inputs:
artifacts-dir: artifacts-dir:
description: Directory to store artifacts description: Directory to store artifacts
required: true required: true
target-files: target-file:
description: The multiple target files to upload, separated by comma description: The path of the target artifact
required: false required: false
version: version:
description: Version of the artifact description: Version of the artifact
@@ -18,21 +18,17 @@ runs:
using: composite using: composite
steps: steps:
- name: Create artifacts directory - name: Create artifacts directory
if: ${{ inputs.target-files != '' }} if: ${{ inputs.target-file != '' }}
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}
shell: bash shell: bash
run: | run: |
set -e mkdir -p ${{ inputs.artifacts-dir }} && \
mkdir -p ${{ inputs.artifacts-dir }} cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
for file in "${FILES[@]}"; do
cp "$file" ${{ inputs.artifacts-dir }}/
done
# The compressed artifacts will use the following layout: # The compressed artifacts will use the following layout:
# greptime-linux-amd64-v0.3.0sha256sum # greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-v0.3.0.tar.gz # greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-v0.3.0 # greptime-linux-amd64-pyo3-v0.3.0
# └── greptime # └── greptime
- name: Compress artifacts and calculate checksum - name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}

View File

@@ -1,3 +0,0 @@
native-tls
openssl
aws-lc-sys

15
.github/labeler.yaml vendored
View File

@@ -1,15 +0,0 @@
ci:
- changed-files:
- any-glob-to-any-file: .github/**
docker:
- changed-files:
- any-glob-to-any-file: docker/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
dashboard:
- changed-files:
- any-glob-to-any-file: grafana/**

View File

@@ -4,8 +4,7 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
## What's changed and what's your intention? ## What's changed and what's your intention?
<!-- __!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
Please explain IN DETAIL what the changes are in this PR and why they are needed: Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -13,14 +12,9 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
- How does this PR work? Need a brief introduction for the changed logic (optional) - How does this PR work? Need a brief introduction for the changed logic (optional)
- Describe clearly one logical change and avoid lazy messages (optional) - Describe clearly one logical change and avoid lazy messages (optional)
- Describe any limitations of the current code (optional) - Describe any limitations of the current code (optional)
- Describe if this PR will break **API or data compatibility** (optional)
-->
## PR Checklist ## Checklist
Please convert it to a draft if some of the following conditions are not met.
- [ ] I have written the necessary rustdoc comments. - [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests. - [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates. - [ ] This PR requires documentation updates.
- [ ] API changes are backward compatible.
- [ ] Schema or data changes are backward compatible.

View File

@@ -1,14 +0,0 @@
#!/bin/sh
set -e
# Get the latest version of github.com/GreptimeTeam/greptimedb
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
echo "Downloading the latest version: $VERSION"
# Download the install script
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
# Execute the `greptime` command
./greptime --version

View File

@@ -1,42 +0,0 @@
#!/bin/bash
# Get current version
CURRENT_VERSION=$1
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: Failed to get current version"
exit 1
fi
# Get the latest version from GitHub Releases
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
echo "Error: Failed to fetch latest version from GitHub"
exit 1
fi
# Get the latest version
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
echo "Error: No valid version found in GitHub releases"
exit 1
fi
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
echo "Current version: $CLEAN_CURRENT"
echo "Latest release version: $CLEAN_LATEST"
# Use sort -V to compare versions
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
else
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
fi

View File

@@ -8,25 +8,24 @@ set -e
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'. # - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX} # create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() { function create_version() {
# Read from environment variables. # Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty" >&2 echo "GITHUB_EVENT_NAME is empty"
exit 1 exit 1
fi fi
if [ -z "$NEXT_RELEASE_VERSION" ]; then if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2 echo "NEXT_RELEASE_VERSION is empty"
# NOTE: Need a `v` prefix for the version string. exit 1
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
fi fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2 echo "NIGHTLY_RELEASE_PREFIX is empty"
exit 1 exit 1
fi fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build. # Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
# It will be like 'nightly-20230808-7d0d8dc6'. # It will be like 'nigtly-20230808-7d0d8dc6'.
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)" echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
exit 0 exit 0
@@ -36,7 +35,7 @@ function create_version() {
# It will be like 'dev-2023080819-f0e7216c'. # It will be like 'dev-2023080819-f0e7216c'.
if [ "$NEXT_RELEASE_VERSION" = dev ]; then if [ "$NEXT_RELEASE_VERSION" = dev ]; then
if [ -z "$COMMIT_SHA" ]; then if [ -z "$COMMIT_SHA" ]; then
echo "COMMIT_SHA is empty in dev build" >&2 echo "COMMIT_SHA is empty in dev build"
exit 1 exit 1
fi fi
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)" echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
@@ -46,7 +45,7 @@ function create_version() {
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs. # Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
if [ "$GITHUB_EVENT_NAME" = push ]; then if [ "$GITHUB_EVENT_NAME" = push ]; then
if [ -z "$GITHUB_REF_NAME" ]; then if [ -z "$GITHUB_REF_NAME" ]; then
echo "GITHUB_REF_NAME is empty in push event" >&2 echo "GITHUB_REF_NAME is empty in push event"
exit 1 exit 1
fi fi
echo "$GITHUB_REF_NAME" echo "$GITHUB_REF_NAME"
@@ -55,15 +54,15 @@ function create_version() {
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")" echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
else else
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2 echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
exit 1 exit 1
fi fi
} }
# You can run as following examples: # You can run as following examples:
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh # GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
create_version create_version

View File

@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd" ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/" GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
# Create a cluster with 1 control-plane node and 5 workers. # Ceate a cluster with 1 control-plane node and 5 workers.
function create_kind_cluster() { function create_kind_cluster() {
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=- cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
kind: Cluster kind: Cluster
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
helm install "$cluster_name" greptime/greptimedb-cluster \ helm install "$cluster_name" greptime/greptimedb-cluster \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \ --set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \ --set meta.etcdEndpoints="etcd.$install_namespace:2379" \
-n "$install_namespace" -n "$install_namespace"
# Wait for greptimedb cluster to be ready. # Wait for greptimedb cluster to be ready.
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \ helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \ --set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \ --set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \ --set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
--set storage.s3.region="$AWS_REGION" \ --set storage.s3.region="$AWS_REGION" \
--set storage.s3.root="$DATA_ROOT" \ --set storage.s3.root="$DATA_ROOT" \

View File

@@ -1,37 +0,0 @@
#!/bin/bash
DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
# Configure Git configs.
git config --global user.email greptimedb-ci@greptime.com
git config --global user.name greptimedb-ci
# Checkout a new branch.
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile
git commit -m "ci: update dev-builder image tag"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "ci: update dev-builder image tag" \
--body "This PR updates the dev-builder image tag" \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_dev_builder_version

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_helm_charts_version() {
# Configure Git configs.
git config --global user.email update-helm-charts-version@greptime.com
git config --global user.name update-helm-charts-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
cd helm-charts
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/helm-charts
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
# Update docs.
make docs
# Commit the changes.
git add .
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_helm_charts_version

View File

@@ -1,42 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_homebrew_greptime_version() {
# Configure Git configs.
git config --global user.email update-greptime-version@greptime.com
git config --global user.name update-greptime-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
cd homebrew-greptime
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/homebrew-greptime
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-greptime-version VERSION=${VERSION}
# Commit the changes.
git add .
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_homebrew_greptime_version

View File

@@ -27,13 +27,13 @@ function upload_artifacts() {
# ├── latest-version.txt # ├── latest-version.txt
# ├── latest-nightly-version.txt # ├── latest-nightly-version.txt
# ├── v0.1.0 # ├── v0.1.0
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum # │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz # │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0 # └── v0.2.0
# ├── greptime-darwin-amd64-v0.2.0.sha256sum # ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-v0.2.0.tar.gz # └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
s5cmd cp \ aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")" "$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done done
} }
@@ -41,11 +41,11 @@ function upload_artifacts() {
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true. # Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() { function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt. # If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt" echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt echo "$VERSION" > latest-version.txt
s5cmd cp \ aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt" latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi fi
@@ -53,7 +53,7 @@ function update_version_info() {
if [[ "$VERSION" == *"nightly"* ]]; then if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt" echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt echo "$VERSION" > latest-nightly-version.txt
s5cmd cp \ aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt" latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi fi
fi fi

View File

@@ -14,11 +14,9 @@ name: Build API docs
jobs: jobs:
apidoc: apidoc:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,35 +0,0 @@
name: Check Dependencies
on:
pull_request:
branches:
- main
jobs:
check-dependencies:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run cargo tree
run: cargo tree --prefix none > dependencies.txt
- name: Extract dependency names
run: awk '{print $1}' dependencies.txt > dependency_names.txt
- name: Check for blacklisted crates
run: |
while read -r dep; do
if grep -qFx "$dep" dependency_names.txt; then
echo "Blacklisted crate '$dep' found in dependencies."
exit 1
fi
done < .github/cargo-blacklist.txt
echo "No blacklisted crates found."

View File

@@ -16,11 +16,11 @@ on:
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.4xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -55,11 +55,6 @@ on:
description: Build and push images to DockerHub and ACR description: Build and push images to DockerHub and ACR
required: false required: false
default: true default: true
upload_artifacts_to_s3:
type: boolean
description: Whether upload artifacts to s3
required: false
default: false
cargo_profile: cargo_profile:
type: choice type: choice
description: The cargo profile to use in building GreptimeDB. description: The cargo profile to use in building GreptimeDB.
@@ -81,14 +76,20 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
IMAGE_NAME: greptimedb-dev
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'. # The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -106,7 +107,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -161,7 +161,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -169,7 +168,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -194,7 +192,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -202,7 +199,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -223,34 +219,26 @@ jobs:
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }} build-result: ${{ steps.set-build-result.outputs.build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry. push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images. dev-mode: true # Only build the standard images.
- name: Echo Docker image tag to step summary
run: |
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
- name: Set build result - name: Set build result
id: set-build-result id: set-build-result
run: | run: |
@@ -263,20 +251,19 @@ jobs:
allocate-runners, allocate-runners,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
continue-on-error: true continue-on-error: true
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -286,7 +273,6 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
dev-mode: true # Only build the standard images(exclude centos images). dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry. push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3. update-version-info: false # Don't update the version info in S3.
@@ -295,7 +281,7 @@ jobs:
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -305,7 +291,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -321,7 +306,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -331,7 +316,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -349,17 +333,11 @@ jobs:
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,6 +1,4 @@
on: on:
schedule:
- cron: "0 15 * * 1-5"
merge_group: merge_group:
pull_request: pull_request:
types: [ opened, synchronize, reopened, ready_for_review ] types: [ opened, synchronize, reopened, ready_for_review ]
@@ -12,6 +10,17 @@ on:
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**' - 'grafana/**'
push:
branches:
- main
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
workflow_dispatch: workflow_dispatch:
name: CI name: CI
@@ -22,13 +31,10 @@ concurrency:
jobs: jobs:
check-typos-and-docs: check-typos-and-docs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check typos and docs name: Check typos and docs
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
- name: Check the config docs - name: Check the config docs
run: | run: |
@@ -37,27 +43,21 @@ jobs:
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1) || (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check: license-header-check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check name: Check
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ windows-2022, ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -68,38 +68,35 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Clippy` job # Shares with `Clippy` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo check - name: Run cargo check
run: cargo check --locked --workspace --all-targets run: cargo check --locked --workspace --all-targets
toml: toml:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Toml Check name: Toml Check
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo - name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked --force run: cargo +stable install taplo-cli --version ^0.9 --locked
- name: Run taplo - name: Run taplo
run: taplo format --check run: taplo format --check
build: build:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binaries name: Build GreptimeDB binaries
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -108,15 +105,13 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-binaries" shared-key: "build-binaries"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin
- name: Build greptime binaries - name: Build greptime binaries
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries - name: Pack greptime binaries
shell: bash shell: bash
run: | run: |
@@ -132,7 +127,6 @@ jobs:
version: current version: current
fuzztest: fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test name: Fuzz Test
needs: build needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -155,18 +149,21 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -189,13 +186,11 @@ jobs:
max-total-time: 120 max-total-time: 120
unstable-fuzztest: unstable-fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Unstable Fuzz Test name: Unstable Fuzz Test
needs: build-greptime-ci needs: build-greptime-ci
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: [ "unstable_fuzz_create_table_standalone" ] target: [ "unstable_fuzz_create_table_standalone" ]
steps: steps:
@@ -212,23 +207,26 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin --force cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binary - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
name: bin name: bin
path: . path: .
- name: Unzip binary - name: Unzip bianry
run: | run: |
tar -xvf ./bin.tar.gz tar -xvf ./bin.tar.gz
rm ./bin.tar.gz rm ./bin.tar.gz
@@ -250,24 +248,16 @@ jobs:
name: unstable-fuzz-logs name: unstable-fuzz-logs
path: /tmp/unstable-greptime/ path: /tmp/unstable-greptime/
retention-days: 3 retention-days: 3
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
build-greptime-ci: build-greptime-ci:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binary (profile-CI) name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -276,15 +266,20 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-greptime-ci" shared-key: "build-greptime-ci"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin
- name: Build greptime binary - name: Check aws-lc-sys will not build
shell: bash
run: |
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
exit 1
fi
- name: Build greptime bianry
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary - name: Pack greptime binary
shell: bash shell: bash
run: | run: |
@@ -299,13 +294,11 @@ jobs:
version: current version: current
distributed-fuzztest: distributed-fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build-greptime-ci needs: build-greptime-ci
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode: mode:
@@ -327,29 +320,34 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- if: matrix.mode.minio - if: matrix.mode.minio
name: Setup Minio name: Setup Minio
uses: ./.github/actions/setup-minio uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup Kafka cluster name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster - name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests # Prepares for fuzz tests
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image # Downloads ci image
- name: Download pre-built binariy - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -410,11 +408,6 @@ jobs:
shell: bash shell: bash
run: | run: |
kubectl describe nodes kubectl describe nodes
- name: Describe pod
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs - name: Export kind logs
if: failure() if: failure()
shell: bash shell: bash
@@ -437,13 +430,11 @@ jobs:
docker system prune -f docker system prune -f
distributed-fuzztest-with-chaos: distributed-fuzztest-with-chaos:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build-greptime-ci needs: build-greptime-ci
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"] target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode: mode:
@@ -478,8 +469,6 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh - name: Setup Chaos Mesh
@@ -488,21 +477,28 @@ jobs:
name: Setup Minio name: Setup Minio
uses: ./.github/actions/setup-minio uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup Kafka cluster name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster - name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests # Prepares for fuzz tests
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image # Downloads ci image
- name: Download pre-built binariy - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -564,11 +560,6 @@ jobs:
shell: bash shell: bash
run: | run: |
kubectl describe nodes kubectl describe nodes
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs - name: Export kind logs
if: failure() if: failure()
shell: bash shell: bash
@@ -591,14 +582,12 @@ jobs:
docker system prune -f docker system prune -f
sqlness: sqlness:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test (${{ matrix.mode.name }})
needs: build needs: build
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
mode: mode:
- name: "Basic" - name: "Basic"
opts: "" opts: ""
@@ -606,18 +595,13 @@ jobs:
- name: "Remote WAL" - name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092" opts: "-w kafka -k 127.0.0.1:9092"
kafka: true kafka: true
- name: "PostgreSQL KvBackend"
opts: "--setup-pg"
kafka: false
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup kafka server name: Setup kafka server
working-directory: tests-integration/fixtures working-directory: tests-integration/fixtures/kafka
run: docker compose up -d --wait kafka run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -636,32 +620,31 @@ jobs:
retention-days: 3 retention-days: 3
fmt: fmt:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Rustfmt name: Rustfmt
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: rustfmt components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format - name: Check format
run: make fmt-check run: make fmt-check
clippy: clippy:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -674,108 +657,60 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Check` job # Shares with `Check` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy - name: Run cargo clippy
run: make clippy run: make clippy
conflict-check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check for conflict
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Merge Conflict Finder
uses: olivernybroe/action-conflict-finder@v4.0
test:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
runs-on: ubuntu-22.04-arm
timeout-minutes: 60
needs: [conflict-check, clippy, fmt]
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Setup external services
working-directory: tests-integration/fixtures
run: docker compose up -d --wait
- name: Run nextest cases
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
RUST_MIN_STACK: 8388608 # 8MB
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
coverage: coverage:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }} if: github.event.pull_request.draft == false
runs-on: ubuntu-22.04-8-cores runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1 - uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain - name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: llvm-tools components: llvm-tools-preview
cache: false
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
# Shares cross multiple jobs # Shares cross multiple jobs
shared-key: "coverage-test" shared-key: "coverage-test"
save-if: ${{ github.ref == 'refs/heads/main' }} - name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release - name: Install latest nextest release
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov
- name: Setup external services - name: Install Python
working-directory: tests-integration/fixtures uses: actions/setup-python@v5
run: docker compose up -d --wait with:
python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Setup etcd server
working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold" CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
@@ -789,7 +724,6 @@ jobs:
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000 GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093 GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
@@ -803,10 +737,9 @@ jobs:
verbose: true verbose: true
# compat: # compat:
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
# name: Compatibility Test # name: Compatibility Test
# needs: build # needs: build
# runs-on: ubuntu-22.04 # runs-on: ubuntu-20.04
# timeout-minutes: 60 # timeout-minutes: 60
# steps: # steps:
# - uses: actions/checkout@v4 # - uses: actions/checkout@v4

View File

@@ -3,21 +3,16 @@ on:
pull_request_target: pull_request_target:
types: [opened, edited] types: [opened, edited]
concurrency: permissions:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} pull-requests: write
cancel-in-progress: true contents: read
jobs: jobs:
docbot: docbot:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
pull-requests: write
contents: read
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue - name: Maybe Follow Up Docs Issue
working-directory: cyborg working-directory: cyborg

View File

@@ -31,47 +31,38 @@ name: CI
jobs: jobs:
typos: typos:
name: Spell Check with Typos name: Spell Check with Typos
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
license-header-check: license-header-check:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
name: Check name: Check
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
fmt: fmt:
name: Rustfmt name: Rustfmt
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
clippy: clippy:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
coverage: coverage:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
test:
runs-on: ubuntu-latest
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
@@ -80,7 +71,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
mode: mode:
- name: "Basic" - name: "Basic"
- name: "Remote WAL" - name: "Remote WAL"

View File

@@ -12,13 +12,13 @@ on:
linux_amd64_runner: linux_amd64_runner:
type: choice type: choice
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.2xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -27,7 +27,7 @@ on:
linux_arm64_runner: linux_arm64_runner:
type: choice type: choice
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64 default: ec2-c6g.2xlarge-arm64
options: options:
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G - ec2-c6g.2xlarge-arm64 # 8C16G
@@ -66,11 +66,18 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -88,7 +95,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -141,7 +147,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -163,7 +168,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -182,25 +186,24 @@ jobs:
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }} nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false push-latest-tag: true
- name: Set nightly build result - name: Set nightly build result
id: set-nightly-build-result id: set-nightly-build-result
@@ -214,7 +217,7 @@ jobs:
allocate-runners, allocate-runners,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues. # When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this. # However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -223,14 +226,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -240,16 +242,15 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: false
dev-mode: false dev-mode: false
update-version-info: false # Don't update version info in S3. update-version-info: false # Don't update version info in S3.
push-latest-tag: false push-latest-tag: true
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -259,7 +260,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -275,7 +275,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -285,7 +285,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -303,15 +302,11 @@ jobs:
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,6 +1,6 @@
on: on:
schedule: schedule:
- cron: "0 23 * * 1-4" - cron: "0 23 * * 1-5"
workflow_dispatch: workflow_dispatch:
name: Nightly CI name: Nightly CI
@@ -9,21 +9,19 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
permissions:
issues: write
jobs: jobs:
sqlness-test: sqlness-test:
name: Run sqlness test name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-22.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check install.sh
run: ./.github/scripts/check-install-script.sh
- name: Run sqlness test - name: Run sqlness test
uses: ./.github/actions/sqlness-test uses: ./.github/actions/sqlness-test
with: with:
@@ -44,14 +42,9 @@ jobs:
name: Sqlness tests on Windows name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores runs-on: windows-2022-8-cores
permissions:
issues: write
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -79,9 +72,6 @@ jobs:
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -97,42 +87,34 @@ jobs:
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest - name: Install Cargo Nextest
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
distribution: Ubuntu-22.04 distribution: Ubuntu-22.04
- name: Running tests - name: Running tests
run: cargo nextest run -F dashboard run: cargo nextest run -F pyo3_backend,dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link" CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
cleanbuild-linux-nix:
name: Run clean build on Linux
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
timeout-minutes: 45
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: cachix/install-nix-action@v31
- run: nix develop --command cargo check --bin greptime
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
check-status: check-status:
name: Check status name: Check status
needs: [sqlness-test, sqlness-windows, test-on-windows] needs: [sqlness-test, sqlness-windows, test-on-windows]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }} check-result: ${{ steps.set-check-result.outputs.check-result }}
steps: steps:
@@ -145,14 +127,11 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [check-status] needs: [check-status]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,42 +0,0 @@
name: 'PR Labeling'
on:
pull_request_target:
types:
- opened
- synchronize
- reopened
permissions:
contents: read
pull-requests: write
issues: write
jobs:
labeler:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- uses: actions/labeler@v5
with:
configuration-path: ".github/labeler.yaml"
repo-token: "${{ secrets.GITHUB_TOKEN }}"
size-label:
runs-on: ubuntu-latest
steps:
- uses: pascalgn/size-label-action@v0.5.5
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
with:
sizes: >
{
"0": "XS",
"100": "S",
"300": "M",
"1000": "L",
"1500": "XL",
"2000": "XXL"
}

View File

@@ -24,20 +24,12 @@ on:
description: Release dev-builder-android image description: Release dev-builder-android image
required: false required: false
default: false default: false
update_dev_builder_image_tag:
type: boolean
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
required: false
default: false
jobs: jobs:
release-dev-builder-images: release-dev-builder-images:
name: Release dev builder images name: Release dev builder images
# The jobs are triggered by the following events: if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
# 1. Manually triggered workflow_dispatch event runs-on: ubuntu-20.04-16-cores
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.set-version.outputs.version }} version: ${{ steps.set-version.outputs.version }}
steps: steps:
@@ -45,7 +37,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Configure build image version - name: Configure build image version
id: set-version id: set-version
@@ -65,13 +56,13 @@ jobs:
version: ${{ env.VERSION }} version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }} dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr: release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR name: Release dev builder images to AWS ECR
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -93,70 +84,52 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container. release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region name: Release dev builder images to CN region
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -170,63 +143,30 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
update-dev-builder-image-tag:
name: Update dev-builder image tag
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
needs: [
release-dev-builder-images
]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update dev-builder image tag
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -18,11 +18,11 @@ on:
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.4xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -31,7 +31,7 @@ on:
linux_arm64_runner: linux_arm64_runner:
type: choice type: choice
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.8xlarge-arm64 default: ec2-c6g.4xlarge-arm64
options: options:
- ubuntu-2204-32-cores-arm - ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
@@ -88,14 +88,21 @@ env:
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }} DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.10.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -110,14 +117,11 @@ jobs:
# The 'version' use as the global tag name of the release workflow. # The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }} version: ${{ steps.create-version.outputs.version }}
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check Rust toolchain version - name: Check Rust toolchain version
shell: bash shell: bash
@@ -126,7 +130,7 @@ jobs:
# The create-version will create a global variable named 'version' in the global workflows. # The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }}); # - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313; # - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245; # - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version - name: Create version
id: create-version id: create-version
@@ -135,13 +139,9 @@ jobs:
env: env:
GITHUB_EVENT_NAME: ${{ github.event_name }} GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }} GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }} NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Check version
id: check-version
run: |
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
- name: Allocate linux-amd64 runner - name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }} if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner uses: ./.github/actions/start-runner
@@ -181,7 +181,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -203,7 +202,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -224,10 +222,18 @@ jobs:
arch: aarch64-apple-darwin arch: aarch64-apple-darwin
features: servers/dashboard features: servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64 artifacts-dir-prefix: greptime-darwin-arm64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
- os: ${{ needs.allocate-runners.outputs.macos-runner }} - os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: servers/dashboard features: servers/dashboard
arch: x86_64-apple-darwin arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64 artifacts-dir-prefix: greptime-darwin-amd64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: pyo3_backend,servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs: outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }} build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
@@ -239,7 +245,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-macos-artifacts - uses: ./.github/actions/build-macos-artifacts
with: with:
@@ -266,6 +271,10 @@ jobs:
arch: x86_64-pc-windows-msvc arch: x86_64-pc-windows-msvc
features: servers/dashboard features: servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64 artifacts-dir-prefix: greptime-windows-amd64
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs: outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }} build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
@@ -279,7 +288,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-windows-artifacts - uses: ./.github/actions/build-windows-artifacts
with: with:
@@ -303,25 +311,22 @@ jobs:
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-2004-16-cores
outputs: outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }} build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
- name: Set build image result - name: Set build image result
id: set-build-image-result id: set-build-image-result
@@ -339,7 +344,7 @@ jobs:
build-windows-artifacts, build-windows-artifacts,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest-16-cores runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues. # When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this. # However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -348,14 +353,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }} src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -366,9 +370,8 @@ jobs:
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false dev-mode: false
upload-to-s3: true
update-version-info: true update-version-info: true
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }} push-latest-tag: true
publish-github-release: publish-github-release:
name: Create GitHub release and upload artifacts name: Create GitHub release and upload artifacts
@@ -381,12 +384,11 @@ jobs:
build-windows-artifacts, build-windows-artifacts,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Publish GitHub release - name: Publish GitHub release
uses: ./.github/actions/publish-github-release uses: ./.github/actions/publish-github-release
@@ -395,12 +397,12 @@ jobs:
### Stop runners ### ### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'. # It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting. # Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -410,7 +412,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -426,7 +427,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -436,7 +437,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -448,74 +448,6 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }} aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-downstream-repo-versions:
name: Bump downstream repo versions
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg
- name: Bump downstream repo versions
working-directory: cyborg
run: pnpm tsx bin/bump-versions.ts
env:
TARGET_REPOS: website,docs,demo
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
bump-helm-charts-version:
name: Bump helm charts version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump helm charts version
env:
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-helm-charts-version.sh
bump-homebrew-greptime-version:
name: Bump homebrew greptime version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump homebrew greptime version
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-homebrew-greptme-version.sh
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team name: Send notification to Greptime team
@@ -524,18 +456,11 @@ jobs:
build-macos-artifacts, build-macos-artifacts,
build-windows-artifacts, build-windows-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -4,20 +4,18 @@ on:
- cron: '4 2 * * *' - cron: '4 2 * * *'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs: jobs:
maintenance: maintenance:
name: Periodic Maintenance name: Periodic Maintenance
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
issues: write
pull-requests: write
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Do Maintenance - name: Do Maintenance
working-directory: cyborg working-directory: cyborg

View File

@@ -1,24 +1,15 @@
name: "Semantic Pull Request" name: "Semantic Pull Request"
on: on:
pull_request: pull_request_target:
types: types:
- opened - opened
- reopened - reopened
- edited - edited
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
issues: write
contents: write
pull-requests: write
jobs: jobs:
check: check:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4

13
.gitignore vendored
View File

@@ -47,17 +47,6 @@ benchmarks/data
venv/ venv/
# Fuzz tests # Fuzz tests
tests-fuzz/artifacts/ tests-fuzz/artifacts/
tests-fuzz/corpus/ tests-fuzz/corpus/
# Nix
.direnv
.envrc
## default data home
greptimedb_data
# github
!/.github

View File

@@ -17,6 +17,6 @@ repos:
- id: fmt - id: fmt
- id: clippy - id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"] args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
stages: [pre-push] stages: [push]
- id: cargo-check - id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"] args: ["--workspace", "--all-targets", "--all-features"]

View File

@@ -3,43 +3,41 @@
## Individual Committers (in alphabetical order) ## Individual Committers (in alphabetical order)
* [CookiePieWw](https://github.com/CookiePieWw) * [CookiePieWw](https://github.com/CookiePieWw)
* [KKould](https://github.com/KKould)
* [NiwakaDev](https://github.com/NiwakaDev)
* [etolbakov](https://github.com/etolbakov) * [etolbakov](https://github.com/etolbakov)
* [irenjj](https://github.com/irenjj) * [irenjj](https://github.com/irenjj)
* [KKould](https://github.com/KKould)
* [Lanqing Yang](https://github.com/lyang24)
* [NiwakaDev](https://github.com/NiwakaDev)
* [tisonkun](https://github.com/tisonkun)
## Team Members (in alphabetical order) ## Team Members (in alphabetical order)
* [Breeze-P](https://github.com/Breeze-P)
* [GrepTime](https://github.com/GrepTime)
* [MichaelScofield](https://github.com/MichaelScofield)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [ZonaHex](https://github.com/ZonaHex)
* [apdong2022](https://github.com/apdong2022) * [apdong2022](https://github.com/apdong2022)
* [beryl678](https://github.com/beryl678) * [beryl678](https://github.com/beryl678)
* [Breeze-P](https://github.com/Breeze-P)
* [daviderli614](https://github.com/daviderli614) * [daviderli614](https://github.com/daviderli614)
* [discord9](https://github.com/discord9) * [discord9](https://github.com/discord9)
* [evenyag](https://github.com/evenyag) * [evenyag](https://github.com/evenyag)
* [fengjiachun](https://github.com/fengjiachun) * [fengjiachun](https://github.com/fengjiachun)
* [fengys1996](https://github.com/fengys1996) * [fengys1996](https://github.com/fengys1996)
* [GrepTime](https://github.com/GrepTime)
* [holalengyu](https://github.com/holalengyu) * [holalengyu](https://github.com/holalengyu)
* [killme2008](https://github.com/killme2008) * [killme2008](https://github.com/killme2008)
* [MichaelScofield](https://github.com/MichaelScofield)
* [nicecui](https://github.com/nicecui) * [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian) * [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong) * [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong) * [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87) * [sunng87](https://github.com/sunng87)
* [tisonkun](https://github.com/tisonkun)
* [v0y4g3r](https://github.com/v0y4g3r) * [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia) * [waynexia](https://github.com/waynexia)
* [Wenjie0329](https://github.com/Wenjie0329)
* [WenyXu](https://github.com/WenyXu)
* [xtang](https://github.com/xtang) * [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01) * [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc) * [zhongzc](https://github.com/zhongzc)
* [ZonaHex](https://github.com/ZonaHex)
* [zyy17](https://github.com/zyy17) * [zyy17](https://github.com/zyy17)
## All Contributors ## All Contributors
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors) [![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

4742
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,26 +2,23 @@
members = [ members = [
"src/api", "src/api",
"src/auth", "src/auth",
"src/cache",
"src/catalog", "src/catalog",
"src/cli", "src/cache",
"src/client", "src/client",
"src/cmd", "src/cmd",
"src/common/base", "src/common/base",
"src/common/catalog", "src/common/catalog",
"src/common/config", "src/common/config",
"src/common/datasource", "src/common/datasource",
"src/common/decimal",
"src/common/error", "src/common/error",
"src/common/frontend", "src/common/frontend",
"src/common/function", "src/common/function",
"src/common/macro",
"src/common/greptimedb-telemetry", "src/common/greptimedb-telemetry",
"src/common/grpc", "src/common/grpc",
"src/common/grpc-expr", "src/common/grpc-expr",
"src/common/macro",
"src/common/mem-prof", "src/common/mem-prof",
"src/common/meta", "src/common/meta",
"src/common/options",
"src/common/plugins", "src/common/plugins",
"src/common/pprof", "src/common/pprof",
"src/common/procedure", "src/common/procedure",
@@ -33,6 +30,7 @@ members = [
"src/common/telemetry", "src/common/telemetry",
"src/common/test-util", "src/common/test-util",
"src/common/time", "src/common/time",
"src/common/decimal",
"src/common/version", "src/common/version",
"src/common/wal", "src/common/wal",
"src/datanode", "src/datanode",
@@ -40,8 +38,6 @@ members = [
"src/file-engine", "src/file-engine",
"src/flow", "src/flow",
"src/frontend", "src/frontend",
"src/index",
"src/log-query",
"src/log-store", "src/log-store",
"src/meta-client", "src/meta-client",
"src/meta-srv", "src/meta-srv",
@@ -55,11 +51,13 @@ members = [
"src/promql", "src/promql",
"src/puffin", "src/puffin",
"src/query", "src/query",
"src/script",
"src/servers", "src/servers",
"src/session", "src/session",
"src/sql", "src/sql",
"src/store-api", "src/store-api",
"src/table", "src/table",
"src/index",
"tests-fuzz", "tests-fuzz",
"tests-integration", "tests-integration",
"tests/runner", "tests/runner",
@@ -67,7 +65,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.12.2" version = "0.9.5"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
@@ -78,10 +76,11 @@ clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn" clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow" clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny" rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies] [workspace.dependencies]
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
# We turn off default-features for some dependencies here so the workspaces which inherit them can # We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false) # selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false). # for the inherited dependency but cannot do the reverse (override from true to false).
@@ -89,66 +88,52 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329 # See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] } ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.3" aquamarine = "0.3"
arrow = { version = "53.0.0", features = ["prettyprint"] } arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] } arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-flight = "53.0" arrow-flight = "51.0"
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] } arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-schema = { version = "53.0", features = ["serde"] } arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
# Remember to update axum-extra, axum-macros when updating axum axum = { version = "0.6", features = ["headers"] }
axum = "0.8"
axum-extra = "0.10"
axum-macros = "0.4"
backon = "1"
base64 = "0.21" base64 = "0.21"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12" bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] } bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
chrono-tz = "0.10.1"
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
config = "0.13.0" config = "0.13.0"
crossbeam-utils = "0.8" crossbeam-utils = "0.8"
dashmap = "5.4" dashmap = "5.4"
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" } datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
deadpool = "0.10"
deadpool-postgres = "0.12"
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" dotenv = "0.15"
etcd-client = "0.14" etcd-client = { version = "0.13" }
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
hex = "0.4"
http = "1"
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
hyper = "1.1"
hyper-util = "0.1"
itertools = "0.10" itertools = "0.10"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false } jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4" lazy_static = "1.4"
local-ip-address = "0.6" meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
mockall = "0.11.4" mockall = "0.11.4"
moka = "0.12" moka = "0.12"
nalgebra = "0.33"
notify = "6.1" notify = "6.1"
num_cpus = "1.16" num_cpus = "1.16"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { version = "0.27", features = [ opentelemetry-proto = { version = "0.5", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
@@ -156,19 +141,17 @@ opentelemetry-proto = { version = "0.27", features = [
"logs", "logs",
] } ] }
parking_lot = "0.12" parking_lot = "0.12"
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] } parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [ promql-parser = { version = "0.4.3", features = ["ser"] }
"ser", prost = "0.12"
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
prost = "0.13"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8" rand = "0.8"
ratelimit = "0.9" ratelimit = "0.9"
regex = "1.8" regex = "1.8"
regex-automata = "0.4" regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.12", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
@@ -182,43 +165,39 @@ rstest = "0.21"
rstest_reuse = "0.7" rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
rustc-hash = "2.0" rustc-hash = "2.0"
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io] schemars = "0.8"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3" serde_with = "3"
shadow-rs = "0.38" shadow-rs = "0.35"
similar-asserts = "1.6.0" similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.8" snafu = "0.8"
sysinfo = "0.30" sysinfo = "0.30"
# on branch v0.52.x # on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor", "visitor",
"serde", ] }
] } # on branch v0.44.x
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.40", features = ["full"] } tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7" tokio-postgres = "0.7"
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io] tokio-stream = { version = "0.1" }
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] } tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = "0.5" tower = { version = "0.4" }
tracing-appender = "0.2" tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2" typetag = "0.2"
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13" zstd = "0.13"
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
auth = { path = "src/auth" } auth = { path = "src/auth" }
cache = { path = "src/cache" } cache = { path = "src/cache" }
catalog = { path = "src/catalog" } catalog = { path = "src/catalog" }
cli = { path = "src/cli" }
client = { path = "src/client" } client = { path = "src/client" }
cmd = { path = "src/cmd", default-features = false } cmd = { path = "src/cmd", default-features = false }
common-base = { path = "src/common/base" } common-base = { path = "src/common/base" }
@@ -235,7 +214,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" } common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" } common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" } common-meta = { path = "src/common/meta" }
common-options = { path = "src/common/options" }
common-plugins = { path = "src/common/plugins" } common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" } common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" } common-procedure = { path = "src/common/procedure" }
@@ -254,7 +232,6 @@ file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" } flow = { path = "src/flow" }
frontend = { path = "src/frontend", default-features = false } frontend = { path = "src/frontend", default-features = false }
index = { path = "src/index" } index = { path = "src/index" }
log-query = { path = "src/log-query" }
log-store = { path = "src/log-store" } log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" } meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" } meta-srv = { path = "src/meta-srv" }
@@ -268,6 +245,7 @@ plugins = { path = "src/plugins" }
promql = { path = "src/promql" } promql = { path = "src/promql" }
puffin = { path = "src/puffin" } puffin = { path = "src/puffin" }
query = { path = "src/query" } query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" } servers = { path = "src/servers" }
session = { path = "src/session" } session = { path = "src/session" }
sql = { path = "src/sql" } sql = { path = "src/sql" }
@@ -277,25 +255,27 @@ table = { path = "src/table" }
[patch.crates-io] [patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work" # change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1 # This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526 # see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" } # aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" rev = "a10facb353b41460eeb98578868ebf19c2084fac"
[profile.release] [profile.release]
debug = 1 # debug = 1
split-debuginfo = "off"
[profile.nightly] [profile.nightly]
inherits = "release" inherits = "release"
strip = "debuginfo" split-debuginfo = "off"
# strip = "debuginfo"
lto = "thin" lto = "thin"
debug = false # debug = false
incremental = false incremental = false
[profile.ci] [profile.ci]

View File

@@ -1,6 +1,3 @@
[target.aarch64-unknown-linux-gnu]
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
[build] [build]
pre-build = [ pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH", "dpkg --add-architecture $CROSS_DEB_ARCH",
@@ -8,8 +5,3 @@ pre-build = [
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/", "curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google", "chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
] ]
[build.env]
passthrough = [
"JEMALLOC_SYS_WITH_LG_PAGE",
]

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746 DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu BASE_IMAGE ?= ubuntu
@@ -165,14 +165,15 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness ${SQLNESS_OPTS} cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1 RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz .PHONY: fuzz
fuzz: ## Run fuzz test ${FUZZ_TARGET}. fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS} cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls .PHONY: fuzz-ls
fuzz-ls: ## List all fuzz targets. fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check .PHONY: check

View File

@@ -6,14 +6,14 @@
</picture> </picture>
</p> </p>
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2> <h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
<div align="center"> <div align="center">
<h3 align="center"> <h3 align="center">
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> | <a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> | <a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> | <a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a> <a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4> </h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
@@ -48,51 +48,37 @@
</a> </a>
</div> </div>
- [Introduction](#introduction)
- [**Features: Why GreptimeDB**](#why-greptimedb)
- [Architecture](https://docs.greptime.com/contributor-guide/overview/#architecture)
- [Try it for free](#try-greptimedb)
- [Getting Started](#getting-started)
- [Project Status](#project-status)
- [Join the community](#community)
- [Contributing](#contributing)
- [Tools & Extensions](#tools--extensions)
- [License](#license)
- [Acknowledgement](#acknowledgement)
## Introduction ## Introduction
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale. **GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
## Why GreptimeDB ## Why GreptimeDB
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you: Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
* **Unified Processing of Metrics, Logs, and Events** * **Unified all kinds of time series**
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model). GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
* **Cloud-native Distributed Database** * **Cloud-Edge collaboration**
Built for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management). GreptimeDB achieves seamless scalability with its [cloud-native architecture](https://docs.greptime.com/user-guide/concepts/architecture) of separated compute and storage, built on object storage (AWS S3, Azure Blob Storage, etc.) while enabling cross-cloud deployment through a unified data access layer. GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
* **Cloud-native distributed database**
By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
* **Performance and Cost-effective** * **Performance and Cost-effective**
Written in pure Rust for superior performance and reliability. GreptimeDB features a distributed query engine with intelligent indexing to handle high cardinality data efficiently. Its optimized columnar storage achieves 50x cost efficiency on cloud object storage through advanced compression. [Benchmark reports](https://www.greptime.com/blogs/2024-09-09-report-summary). Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Cloud-Edge Collaboration** * **Compatible with InfluxDB, Prometheus and more protocols**
GreptimeDB seamlessly operates across cloud and edge (ARM/Android/Linux), providing consistent APIs and control plane for unified data management and efficient synchronization. [Learn how to run on Android](https://docs.greptime.com/user-guide/deployments/run-on-android/). Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
* **Multi-protocol Ingestion, SQL & PromQL Ready**
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, InfluxDB, OpenTelemetry, Loki and Prometheus, etc. Effortless Adoption & Seamless Migration. [Supported Protocols Overview](https://docs.greptime.com/user-guide/protocols/overview).
For more detailed info please read [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb).
## Try GreptimeDB ## Try GreptimeDB
### 1. [Live Demo](https://greptime.com/playground) ### 1. [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser. Try out the features of GreptimeDB right from your browser.
@@ -111,18 +97,9 @@ docker pull greptime/greptimedb
Start a GreptimeDB container with: Start a GreptimeDB container with:
```shell ```shell
docker run -p 127.0.0.1:4000-4003:4000-4003 \ docker run --rm --name greptime --net=host greptime/greptimedb standalone start
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
--name greptime --rm \
greptime/greptimedb:latest standalone start \
--http-addr 0.0.0.0:4000 \
--rpc-bind-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:4002 \
--postgres-addr 0.0.0.0:4003
``` ```
Access the dashboard via `http://localhost:4000/dashboard`.
Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs. Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
## Getting Started ## Getting Started
@@ -138,8 +115,7 @@ Check the prerequisite:
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly) * [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15) * [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora) * Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
* Python toolchain (optional): Required only if using some test scripts.
Build GreptimeDB binary: Build GreptimeDB binary:
@@ -153,11 +129,7 @@ Run a standalone server:
cargo run -- standalone start cargo run -- standalone start
``` ```
## Tools & Extensions ## Extension
### Kubernetes
- [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
### Dashboard ### Dashboard
@@ -174,19 +146,14 @@ cargo run -- standalone start
### Grafana Dashboard ### Grafana Dashboard
Our official Grafana dashboard for monitoring GreptimeDB is available at [grafana](grafana/README.md) directory. Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
## Project Status ## Project Status
GreptimeDB is currently in Beta. We are targeting GA (General Availability) with v1.0 release by Early 2025. The current version has not yet reached the standards for General Availability.
According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
While in Beta, GreptimeDB is already: We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
* Being used in production by early adopters
* Actively maintained with regular releases, [about version number](https://docs.greptime.com/nightly/reference/about-greptimedb-version)
* Suitable for testing and evaluation
For production use, we recommend using the latest stable release.
## Community ## Community
@@ -205,12 +172,12 @@ In addition, you may:
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/) - Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime) - Follow us on [Twitter](https://twitter.com/greptime)
## Commercial Support ## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional If you are running GreptimeDB OSS in your organization, we offer additional
enterprise add-ons, installation services, training, and consulting. [Contact enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commercial license. detail of our commerial license.
## License ## License
@@ -229,3 +196,4 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/). - GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer. - [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/). - GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -13,12 +13,11 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | Unset | The default timezone of the server. | | `default_timezone` | String | Unset | The default timezone of the server. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. | | `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. | | `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -26,10 +25,8 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -40,7 +37,6 @@
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -50,7 +46,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -60,17 +55,15 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -92,19 +85,16 @@
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. | | `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. | | `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. | | `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. | | `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -119,11 +109,6 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -141,45 +126,37 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). | | `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -213,7 +190,6 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. | | `default_timezone` | String | Unset | The default timezone of the server. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -224,11 +200,9 @@
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
@@ -239,7 +213,6 @@
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -249,7 +222,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -259,8 +231,6 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
@@ -310,17 +280,14 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `data_home` | String | `/tmp/metasrv/` | The working home directory. | | `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. | | `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. | | `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" | | `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". | | `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. | | `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). | | `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. | | `backend` | String | `EtcdStore` | The datastore for meta server. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -344,7 +311,7 @@
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` | | `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.num_topics` | Integer | `64` | Number of topics. | | `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) | | `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. | | `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. | | `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. | | `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. | | `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
@@ -385,16 +352,21 @@
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. | | `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. | | `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. | | `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. | | `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. | | `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
@@ -422,9 +394,9 @@
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -444,8 +416,8 @@
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -460,11 +432,6 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -482,21 +449,18 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). | | `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -508,19 +472,12 @@
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -553,18 +510,12 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. | | `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. | | `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. | | `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | | `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. | | `meta_client.timeout` | String | `3s` | Operation timeout. |

View File

@@ -13,14 +13,35 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized. ## By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions. ## Parallelism of initializing regions.
init_regions_parallelism = 16 init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited. ## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0 max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default. ## Deprecated, use `grpc.addr` instead.
#+ enable_telemetry = true ## @toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead.
## @toml2docs:none-default
rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead.
## @toml2docs:none-default
rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options. ## The HTTP server options.
[http] [http]
@@ -36,11 +57,10 @@ body_limit = "64MB"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:3001" addr = "127.0.0.1:3001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:3001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
@@ -123,15 +143,15 @@ dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
file_size = "128MB" file_size = "256MB"
## The threshold of the WAL size to trigger a flush. ## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_threshold = "1GB" purge_threshold = "4GB"
## The interval to trigger a flush. ## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_interval = "1m" purge_interval = "10m"
## The read batch size. ## The read batch size.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
@@ -231,7 +251,6 @@ overwrite_entry_start_id = false
# secret_access_key = "123456" # secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com" # endpoint = "https://s3.amazonaws.com"
# region = "us-west-2" # region = "us-west-2"
# enable_virtual_host_style = false
# Example of using Oss as the storage. # Example of using Oss as the storage.
# [storage] # [storage]
@@ -275,14 +294,14 @@ data_home = "/tmp/greptimedb/"
## - `Oss`: the data is stored in the Aliyun OSS. ## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance. ## Cache configuration for object storage such as 'S3' etc.
## A local file directory, defaults to `{data_home}`. An empty string means disabling. ## The local file cache directory.
## @toml2docs:none-default ## @toml2docs:none-default
#+ cache_path = "" cache_path = "/path/local_cache"
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. ## The local file cache capacity in bytes.
## @toml2docs:none-default ## @toml2docs:none-default
cache_capacity = "5GiB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -356,23 +375,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default ## @toml2docs:none-default
region = "us-west-2" region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3" # name = "S3"
@@ -457,22 +459,28 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}/write_cache`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache.
write_cache_size = "5GiB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task. ## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
@@ -498,20 +506,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -563,30 +557,6 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold ## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto" mem_threshold_on_create = "auto"
## The options for bloom filter index in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for the index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -609,12 +579,6 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -5,19 +5,13 @@ mode = "distributed"
## @toml2docs:none-default ## @toml2docs:none-default
node_id = 14 node_id = 14
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:6800" addr = "127.0.0.1:6800"
## The address advertised to the metasrv, ## The hostname advertised to the metasrv,
## and used for connections from outside the host ## and used for connections from outside the host
server_addr = "127.0.0.1:6800" hostname = "127.0.0.1"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
@@ -25,16 +19,6 @@ max_recv_message_size = "512MB"
## The maximum send message size for gRPC server. ## The maximum send message size for gRPC server.
max_send_message_size = "512MB" max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "30s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The metasrv client options. ## The metasrv client options.
[meta_client] [meta_client]

View File

@@ -2,10 +2,6 @@
## @toml2docs:none-default ## @toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
## The number of threads to execute the runtime for global read operations. ## The number of threads to execute the runtime for global read operations.
@@ -31,21 +27,14 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
@@ -74,9 +63,6 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -108,9 +94,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -138,11 +121,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.

View File

@@ -4,35 +4,11 @@ data_home = "/tmp/metasrv/"
## The bind address of metasrv. ## The bind address of metasrv.
bind_addr = "127.0.0.1:3002" bind_addr = "127.0.0.1:3002"
## The communication server address for the frontend and datanode to connect to metasrv. ## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `bind_addr`.
server_addr = "127.0.0.1:3002" server_addr = "127.0.0.1:3002"
## Store server address default to etcd store. ## Store server address default to etcd store.
## For postgres store, the format is: store_addr = "127.0.0.1:2379"
## "password=password dbname=postgres user=postgres host=localhost port=5432"
## For etcd store, the format is:
## "127.0.0.1:2379"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## The datastore for meta server.
## Available values:
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
## **Only used when backend is `postgres_store`.**
meta_table_name = "greptime_metakv"
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
## Only used when backend is `postgres_store`.
meta_election_lock_id = 1
## Datanode selector type. ## Datanode selector type.
## - `round_robin` (default value) ## - `round_robin` (default value)
@@ -44,17 +20,20 @@ selector = "round_robin"
## Store data in memory. ## Store data in memory.
use_memory_store = false use_memory_store = false
## Whether to enable greptimedb telemetry.
enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## Whether to enable region failover. ## Whether to enable region failover.
## This feature is only available on GreptimeDB running on cluster mode and ## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL ## - Using Remote WAL
## - Using shared storage (e.g., s3). ## - Using shared storage (e.g., s3).
enable_region_failover = false enable_region_failover = false
## Max allowed idle time before removing node info from metasrv memory. ## The datastore for meta server.
node_max_idle_time = "24hours" backend = "EtcdStore"
## Whether to enable greptimedb telemetry. Enabled by default.
#+ enable_telemetry = true
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
@@ -134,8 +113,6 @@ num_topics = 64
selector_type = "round_robin" selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. ## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## Only accepts strings that match the following regular expression pattern:
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. ## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
topic_name_prefix = "greptimedb_wal_topic" topic_name_prefix = "greptimedb_wal_topic"

View File

@@ -1,6 +1,9 @@
## The running mode of the datanode. It can be `standalone` or `distributed`. ## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone" mode = "standalone"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The default timezone of the server. ## The default timezone of the server.
## @toml2docs:none-default ## @toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
@@ -15,13 +18,6 @@ init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited. ## The maximum current queries allowed to be executed. Zero means unlimited.
max_concurrent_queries = 0 max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
#+ enable_telemetry = true
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
## The number of threads to execute the runtime for global read operations. ## The number of threads to execute the runtime for global read operations.
@@ -39,17 +35,11 @@ timeout = "30s"
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
@@ -78,9 +68,6 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -112,9 +99,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -142,11 +126,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.
@@ -168,15 +147,15 @@ dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
file_size = "128MB" file_size = "256MB"
## The threshold of the WAL size to trigger a purge. ## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_threshold = "1GB" purge_threshold = "4GB"
## The interval to trigger a purge. ## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_interval = "1m" purge_interval = "10m"
## The read batch size. ## The read batch size.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
@@ -289,12 +268,10 @@ overwrite_entry_start_id = false
## Metadata storage options. ## Metadata storage options.
[metadata_store] [metadata_store]
## The size of the metadata store log file. ## Kv file size in bytes.
file_size = "64MB" file_size = "256MB"
## The threshold of the metadata store size to trigger a purge. ## Kv purge threshold.
purge_threshold = "256MB" purge_threshold = "4GB"
## The interval of the metadata store to trigger a purge.
purge_interval = "1m"
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -303,12 +280,6 @@ max_retry_times = 3
## Initial retry delay of procedures, increases exponentially ## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
# Example of using S3 as the storage. # Example of using S3 as the storage.
# [storage] # [storage]
# type = "S3" # type = "S3"
@@ -318,7 +289,6 @@ retry_delay = "500ms"
# secret_access_key = "123456" # secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com" # endpoint = "https://s3.amazonaws.com"
# region = "us-west-2" # region = "us-west-2"
# enable_virtual_host_style = false
# Example of using Oss as the storage. # Example of using Oss as the storage.
# [storage] # [storage]
@@ -362,14 +332,14 @@ data_home = "/tmp/greptimedb/"
## - `Oss`: the data is stored in the Aliyun OSS. ## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance. ## Cache configuration for object storage such as 'S3' etc.
## A local file directory, defaults to `{data_home}`. An empty string means disabling. ## The local file cache directory.
## @toml2docs:none-default ## @toml2docs:none-default
#+ cache_path = "" cache_path = "/path/local_cache"
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. ## The local file cache capacity in bytes.
## @toml2docs:none-default ## @toml2docs:none-default
cache_capacity = "5GiB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -443,23 +413,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default ## @toml2docs:none-default
region = "us-west-2" region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3" # name = "S3"
@@ -544,22 +497,28 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}/write_cache`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache.
write_cache_size = "5GiB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task. ## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
@@ -585,20 +544,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -626,6 +571,12 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = "" intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine. ## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index] [region_engine.mito.fulltext_index]
@@ -650,30 +601,6 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold ## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto" mem_threshold_on_create = "auto"
## The options for bloom filter in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the bloom filter on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the bloom filter on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the bloom filter on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for bloom filter creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -696,12 +623,6 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to enable the experimental sparse primary key encoding.
experimental_sparse_primary_key_encoding = false
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.

View File

@@ -1,75 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
async function triggerWorkflow(workflowId: string, version: string) {
const docsClient = obtainClient("DOCS_REPO_TOKEN")
try {
await docsClient.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: "docs",
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow: ${error.message}`);
}
}
function determineWorkflow(version: string): [string, string] {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
try {
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
triggerWorkflow(workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing version: ${error.message}`);
process.exit(1);
}

View File

@@ -1,156 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
interface RepoConfig {
tokenEnv: string;
repo: string;
workflowLogic: (version: string) => [string, string] | null;
}
const REPO_CONFIGS: Record<string, RepoConfig> = {
website: {
tokenEnv: "WEBSITE_REPO_TOKEN",
repo: "website",
workflowLogic: (version: string) => {
// Skip nightly versions for website
if (version.includes('nightly')) {
console.log('Nightly version detected for website, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
demo: {
tokenEnv: "DEMO_REPO_TOKEN",
repo: "demo-scene",
workflowLogic: (version: string) => {
// Skip nightly versions for demo
if (version.includes('nightly')) {
console.log('Nightly version detected for demo, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
docs: {
tokenEnv: "DOCS_REPO_TOKEN",
repo: "docs",
workflowLogic: (version: string) => {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
}
};
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
const client = obtainClient(repoConfig.tokenEnv);
try {
await client.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: repoConfig.repo,
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
throw error;
}
}
async function processRepo(repoName: string, version: string) {
const repoConfig = REPO_CONFIGS[repoName];
if (!repoConfig) {
throw new Error(`Unknown repository: ${repoName}`);
}
try {
const workflowResult = repoConfig.workflowLogic(version);
if (workflowResult === null) {
// Skip this repo (e.g., nightly version for website)
return;
}
const [workflowId, apiVersion] = workflowResult;
await triggerWorkflow(repoConfig, workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
throw error;
}
}
async function main() {
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
// Get target repositories from environment variable
// Default to both if not specified
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
const errors: string[] = [];
// Process each repository
for (const repo of targetRepos) {
try {
await processRepo(repo, cleanVersion);
} catch (error) {
errors.push(`${repo}: ${error.message}`);
}
}
if (errors.length > 0) {
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
process.exit(1);
}
console.log('All repositories processed successfully');
}
// Execute main function
main().catch((error) => {
core.setFailed(`Unexpected error: ${error.message}`);
process.exit(1);
});

View File

@@ -55,25 +55,12 @@ async function main() {
await client.rest.issues.addLabels({ await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired], owner, repo, issue_number: number, labels: [labelDocsRequired],
}) })
// Get available assignees for the docs repo
const assigneesResponse = await docsClient.rest.issues.listAssignees({
owner: 'GreptimeTeam',
repo: 'docs',
})
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
core.info(`Available assignees: ${validAssignees.join(', ')}`)
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
core.info(`Assigning issue to: ${assignee}`)
await docsClient.rest.issues.create({ await docsClient.rest.issues.create({
owner: 'GreptimeTeam', owner: 'GreptimeTeam',
repo: 'docs', repo: 'docs',
title: `Update docs for ${title}`, title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`, body: `A document change request is generated from ${html_url}`,
assignee: assignee, assignee: actor,
}).then((res) => { }).then((res) => {
core.info(`Created issue ${res.data}`) core.info(`Created issue ${res.data}`)
}) })

View File

@@ -13,6 +13,8 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
# Install protoc # Install protoc
@@ -22,7 +24,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode. # Build the project in release mode.
RUN --mount=target=.,rw \ RUN --mount=target=.,rw \
@@ -41,6 +43,8 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
WORKDIR /greptime WORKDIR /greptime

View File

@@ -7,8 +7,10 @@ ARG OUTPUT_DIR
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \ RUN --mount=type=cache,target=/var/cache/apt \
@@ -18,7 +20,10 @@ RUN --mount=type=cache,target=/var/cache/apt \
curl \ curl \
git \ git \
build-essential \ build-essential \
pkg-config pkg-config \
python3.10 \
python3.10-dev \
python3-pip
# Install Rust. # Install Rust.
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
@@ -41,8 +46,15 @@ ARG OUTPUT_DIR
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
-y install ca-certificates \ -y install ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl curl
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
WORKDIR /greptime WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/ COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH

View File

@@ -7,7 +7,9 @@ RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl centos-release-scl \
rh-python38 \
rh-python38-python-devel
ARG TARGETARCH ARG TARGETARCH

View File

@@ -8,8 +8,15 @@ ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \ ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl curl
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
ARG TARGETARCH ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/ ADD $TARGETARCH/$TARGET_BIN /greptime/bin/

View File

@@ -9,20 +9,16 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
# Install dependencies. # Install dependencies.
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
libssl-dev \ libssl-dev \
protobuf-compiler \
curl \ curl \
git \ git \
unzip \
build-essential \ build-essential \
pkg-config pkg-config \
python3 \
# Install protoc python3-dev \
ARG PROTOBUF_VERSION=29.3 python3-pip \
&& pip3 install --upgrade pip \
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \ && pip3 install pyarrow
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Trust workdir # Trust workdir
RUN git config --global --add safe.directory /greptimedb RUN git config --global --add safe.directory /greptimedb

View File

@@ -12,21 +12,18 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
# Install protoc # Install protoc
ARG PROTOBUF_VERSION=29.3 RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Install Rust toolchains. # Install Rust toolchains.
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN

View File

@@ -6,34 +6,38 @@ ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \ libssl-dev \
tzdata \ tzdata \
protobuf-compiler \
curl \ curl \
unzip \
ca-certificates \ ca-certificates \
git \ git \
build-essential \ build-essential \
pkg-config pkg-config \
python3.10 \
python3.10-dev
ARG TARGETPLATFORM # https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
RUN echo "target platform: $TARGETPLATFORM" # `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
ARG PROTOBUF_VERSION=29.3 # Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \
# Install protobuf, because the one in the apt is too old (v3.12). apt-get -y autoremove && \
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules. # Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build # Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
@@ -45,7 +49,11 @@ RUN mv protoc3/include/* /usr/local/include/
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user. # wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker, # It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules. # it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*' RUN git config --global --add safe.directory *
# Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust. # Install Rust.
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]

View File

@@ -21,7 +21,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
pkg-config pkg-config
# Install protoc. # Install protoc.
ENV PROTOC_VERSION=29.3 ENV PROTOC_VERSION=25.1
RUN if [ "$(uname -m)" = "x86_64" ]; then \ RUN if [ "$(uname -m)" = "x86_64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \ PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
elif [ "$(uname -m)" = "aarch64" ]; then \ elif [ "$(uname -m)" = "aarch64" ]; then \

View File

@@ -39,16 +39,14 @@ services:
container_name: metasrv container_name: metasrv
ports: ports:
- 3002:3002 - 3002:3002
- 3000:3000
command: command:
- metasrv - metasrv
- start - start
- --rpc-bind-addr=0.0.0.0:3002 - --bind-addr=0.0.0.0:3002
- --rpc-server-addr=metasrv:3002 - --server-addr=metasrv:3002
- --store-addrs=etcd0:2379 - --store-addrs=etcd0:2379
- --http-addr=0.0.0.0:3000
healthcheck: healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ] test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 5 retries: 5
@@ -68,17 +66,17 @@ services:
- datanode - datanode
- start - start
- --node-id=0 - --node-id=0
- --rpc-bind-addr=0.0.0.0:3001 - --rpc-addr=0.0.0.0:3001
- --rpc-server-addr=datanode0:3001 - --rpc-hostname=datanode0:3001
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000 - --http-addr=0.0.0.0:5000
volumes: volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb - /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck: healthcheck:
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ] test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 10 retries: 5
depends_on: depends_on:
metasrv: metasrv:
condition: service_healthy condition: service_healthy
@@ -98,7 +96,7 @@ services:
- start - start
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000 - --http-addr=0.0.0.0:4000
- --rpc-bind-addr=0.0.0.0:4001 - --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002 - --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003 - --postgres-addr=0.0.0.0:4003
healthcheck: healthcheck:
@@ -117,23 +115,16 @@ services:
container_name: flownode0 container_name: flownode0
ports: ports:
- 4004:4004 - 4004:4004
- 4005:4005
command: command:
- flownode - flownode
- start - start
- --node-id=0 - --node-id=0
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --rpc-bind-addr=0.0.0.0:4004 - --rpc-addr=0.0.0.0:4004
- --rpc-server-addr=flownode0:4004 - --rpc-hostname=flownode0:4004
- --http-addr=0.0.0.0:4005
depends_on: depends_on:
frontend0: frontend0:
condition: service_healthy condition: service_healthy
healthcheck:
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
interval: 5s
timeout: 3s
retries: 5
networks: networks:
- greptimedb - greptimedb

View File

@@ -0,0 +1,5 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

View File

@@ -4,13 +4,13 @@
example: example:
```bash ```bash
curl --data "trace,flow=debug" 127.0.0.1:4000/debug/log_level curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
``` ```
And database will reply with something like: And database will reply with something like:
```bash ```bash
Log Level changed from Some("info") to "trace,flow=debug"% Log Level changed from Some("info") to "trace;flow=debug"%
``` ```
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follow the same rule of `RUST_LOG`. The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive). The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).

View File

@@ -3,7 +3,7 @@
## HTTP API ## HTTP API
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto). Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu' > /tmp/pprof.out curl -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
``` ```
Then you can use `pprof` command with the protobuf file. Then you can use `pprof` command with the protobuf file.
@@ -13,10 +13,10 @@ go tool pprof -top /tmp/pprof.out
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format. Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg curl -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
``` ```
Sample at 49 Hertz, for 10 seconds, output report in text format. Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt curl -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
``` ```

View File

@@ -4,16 +4,6 @@ This crate provides an easy approach to dump memory profiling info.
## Prerequisites ## Prerequisites
### jemalloc ### jemalloc
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
```
# find jeprof binary
find . -name 'jeprof'
# add executable permission
chmod +x <path_to_jeprof>
```
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
```bash ```bash
# for macOS # for macOS
brew install jemalloc brew install jemalloc
@@ -33,17 +23,13 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
Start GreptimeDB instance with environment variables: Start GreptimeDB instance with environment variables:
```bash ```bash
# for Linux MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
# for macOS
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
``` ```
Dump memory profiling data through HTTP API: Dump memory profiling data through HTTP API:
```bash ```bash
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof curl localhost:4000/debug/prof/mem > greptime.hprof
``` ```
You can periodically dump profiling data and compare them to find the delta memory usage. You can periodically dump profiling data and compare them to find the delta memory usage.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 36 KiB

BIN
docs/logo-text-padding.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 25 KiB

100
flake.lock generated
View File

@@ -1,100 +0,0 @@
{
"nodes": {
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1737613896,
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
"owner": "nix-community",
"repo": "fenix",
"rev": "303a062fdd8e89f233db05868468975d17855d80",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1737569578,
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1737581772,
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,56 +0,0 @@
{
description = "Development environment flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, fenix, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
buildInputs = with pkgs; [
libgit2
libz
];
lib = nixpkgs.lib;
rustToolchain = fenix.packages.${system}.fromToolchainName {
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
};
in
{
devShells.default = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
git
clang
gcc
protobuf
gnumake
mold
(rustToolchain.withComponents [
"cargo"
"clippy"
"rust-src"
"rustc"
"rustfmt"
"rust-analyzer"
"llvm-tools"
])
cargo-nextest
cargo-llvm-cov
taplo
curl
gnuplot ## for cargo bench
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
};
});
}

View File

@@ -5,13 +5,6 @@ GreptimeDB's official Grafana dashboard.
Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗 Status notify: we are still working on this config. It's expected to change frequently in the recent days. Please feel free to submit your feedback and/or contribution to this dashboard 🤗
If you use Helm [chart](https://github.com/GreptimeTeam/helm-charts) to deploy GreptimeDB cluster, you can enable self-monitoring by setting the following values in your Helm chart:
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
The standalone GreptimeDB instance will collect metrics from your cluster and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
# How to use # How to use
## `greptimedb.json` ## `greptimedb.json`

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,2 @@
[toolchain] [toolchain]
channel = "nightly-2024-12-25" channel = "nightly-2024-10-19"

View File

@@ -14,7 +14,6 @@
import os import os
import re import re
from multiprocessing import Pool
def find_rust_files(directory): def find_rust_files(directory):
@@ -34,11 +33,13 @@ def extract_branch_names(file_content):
return pattern.findall(file_content) return pattern.findall(file_content)
def check_snafu_in_files(branch_name, rust_files_content): def check_snafu_in_files(branch_name, rust_files):
branch_name_snafu = f"{branch_name}Snafu" branch_name_snafu = f"{branch_name}Snafu"
for content in rust_files_content.values(): for rust_file in rust_files:
if branch_name_snafu in content: with open(rust_file, "r") as file:
return True content = file.read()
if branch_name_snafu in content:
return True
return False return False
@@ -48,24 +49,19 @@ def main():
for error_file in error_files: for error_file in error_files:
with open(error_file, "r") as file: with open(error_file, "r") as file:
branch_names.extend(extract_branch_names(file.read())) content = file.read()
branch_names.extend(extract_branch_names(content))
# Read all rust files into memory once unused_snafu = [
rust_files_content = {} branch_name
for rust_file in other_rust_files: for branch_name in branch_names
with open(rust_file, "r") as file: if not check_snafu_in_files(branch_name, other_rust_files)
rust_files_content[rust_file] = file.read() ]
with Pool() as pool: for name in unused_snafu:
results = pool.starmap( print(name)
check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
)
unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
if unused_snafu: if unused_snafu:
print("Unused error variants:")
for name in unused_snafu:
print(name)
raise SystemExit(1) raise SystemExit(1)

View File

@@ -1,4 +1,4 @@
#!/bin/sh #!/usr/bin/env bash
set -ue set -ue
@@ -15,7 +15,7 @@ GITHUB_ORG=GreptimeTeam
GITHUB_REPO=greptimedb GITHUB_REPO=greptimedb
BIN=greptime BIN=greptime
get_os_type() { function get_os_type() {
os_type="$(uname -s)" os_type="$(uname -s)"
case "$os_type" in case "$os_type" in
@@ -31,7 +31,7 @@ get_os_type() {
esac esac
} }
get_arch_type() { function get_arch_type() {
arch_type="$(uname -m)" arch_type="$(uname -m)"
case "$arch_type" in case "$arch_type" in
@@ -53,55 +53,7 @@ get_arch_type() {
esac esac
} }
# Verify SHA256 checksum function download_artifact() {
verify_sha256() {
file="$1"
expected_sha256="$2"
if command -v sha256sum >/dev/null 2>&1; then
actual_sha256=$(sha256sum "$file" | cut -d' ' -f1)
elif command -v shasum >/dev/null 2>&1; then
actual_sha256=$(shasum -a 256 "$file" | cut -d' ' -f1)
else
echo "Warning: No SHA256 verification tool found (sha256sum or shasum). Skipping checksum verification."
return 0
fi
if [ "$actual_sha256" = "$expected_sha256" ]; then
echo "SHA256 checksum verified successfully."
return 0
else
echo "Error: SHA256 checksum verification failed!"
echo "Expected: $expected_sha256"
echo "Actual: $actual_sha256"
return 1
fi
}
# Prompt for user confirmation (compatible with different shells)
prompt_confirmation() {
message="$1"
printf "%s (y/N): " "$message"
# Try to read user input, fallback if read fails
answer=""
if read answer </dev/tty 2>/dev/null; then
case "$answer" in
[Yy]|[Yy][Ee][Ss])
return 0
;;
*)
return 1
;;
esac
else
echo ""
echo "Cannot read user input. Defaulting to No."
return 1
fi
}
download_artifact() {
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
# Use the latest stable released version. # Use the latest stable released version.
# GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release. # GitHub API reference: https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#get-the-latest-release.
@@ -119,104 +71,17 @@ download_artifact() {
fi fi
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}" echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
PKG_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}" PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
PACKAGE_NAME="${PKG_NAME}.tar.gz"
SHA256_FILE="${PKG_NAME}.sha256sum"
if [ -n "${PACKAGE_NAME}" ]; then if [ -n "${PACKAGE_NAME}" ]; then
# Check if files already exist and prompt for override wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
if [ -f "${PACKAGE_NAME}" ]; then
echo "File ${PACKAGE_NAME} already exists."
if prompt_confirmation "Do you want to override it?"; then
echo "Overriding existing file..."
rm -f "${PACKAGE_NAME}"
else
echo "Skipping download. Using existing file."
fi
fi
if [ -f "${BIN}" ]; then
echo "Binary ${BIN} already exists."
if prompt_confirmation "Do you want to override it?"; then
echo "Will override existing binary..."
rm -f "${BIN}"
else
echo "Installation cancelled."
exit 0
fi
fi
# Download package if not exists
if [ ! -f "${PACKAGE_NAME}" ]; then
echo "Downloading ${PACKAGE_NAME}..."
# Use curl instead of wget for better compatibility
if command -v curl >/dev/null 2>&1; then
if ! curl -L -o "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
echo "Error: Failed to download ${PACKAGE_NAME}"
exit 1
fi
elif command -v wget >/dev/null 2>&1; then
if ! wget -O "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
echo "Error: Failed to download ${PACKAGE_NAME}"
exit 1
fi
else
echo "Error: Neither curl nor wget is available for downloading."
exit 1
fi
fi
# Download and verify SHA256 checksum
echo "Downloading SHA256 checksum..."
sha256_download_success=0
if command -v curl >/dev/null 2>&1; then
if curl -L -s -o "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
sha256_download_success=1
fi
elif command -v wget >/dev/null 2>&1; then
if wget -q -O "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
sha256_download_success=1
fi
fi
if [ $sha256_download_success -eq 1 ] && [ -f "${SHA256_FILE}" ]; then
expected_sha256=$(cat "${SHA256_FILE}" | cut -d' ' -f1)
if [ -n "$expected_sha256" ]; then
if ! verify_sha256 "${PACKAGE_NAME}" "${expected_sha256}"; then
echo "SHA256 verification failed. Removing downloaded file."
rm -f "${PACKAGE_NAME}" "${SHA256_FILE}"
exit 1
fi
else
echo "Warning: Could not parse SHA256 checksum from file."
fi
rm -f "${SHA256_FILE}"
else
echo "Warning: Could not download SHA256 checksum file. Skipping verification."
fi
# Extract the binary and clean the rest. # Extract the binary and clean the rest.
echo "Extracting ${PACKAGE_NAME}..." tar xvf "${PACKAGE_NAME}" && \
if ! tar xf "${PACKAGE_NAME}"; then mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
echo "Error: Failed to extract ${PACKAGE_NAME}" rm -r "${PACKAGE_NAME}" && \
exit 1 rm -r "${PACKAGE_NAME%.tar.gz}" && \
fi echo "Run './${BIN} --help' to get started"
# Find the binary in the extracted directory
extracted_dir="${PACKAGE_NAME%.tar.gz}"
if [ -f "${extracted_dir}/${BIN}" ]; then
mv "${extracted_dir}/${BIN}" "${PWD}/"
rm -f "${PACKAGE_NAME}"
rm -rf "${extracted_dir}"
chmod +x "${BIN}"
echo "Installation completed successfully!"
echo "Run './${BIN} --help' to get started"
else
echo "Error: Binary ${BIN} not found in extracted archive"
rm -f "${PACKAGE_NAME}"
rm -rf "${extracted_dir}"
exit 1
fi
fi fi
fi fi
} }

View File

@@ -15,10 +15,13 @@ common-macro.workspace = true
common-time.workspace = true common-time.workspace = true
datatypes.workspace = true datatypes.workspace = true
greptime-proto.workspace = true greptime-proto.workspace = true
paste.workspace = true paste = "1.0"
prost.workspace = true prost.workspace = true
serde_json.workspace = true serde_json.workspace = true
snafu.workspace = true snafu.workspace = true
[build-dependencies] [build-dependencies]
tonic-build = "0.11" tonic-build = "0.11"
[dev-dependencies]
paste = "1.0"

View File

@@ -33,7 +33,7 @@ pub enum Error {
#[snafu(implicit)] #[snafu(implicit)]
location: Location, location: Location,
#[snafu(source)] #[snafu(source)]
error: prost::UnknownEnumValue, error: prost::DecodeError,
}, },
#[snafu(display("Failed to create column datatype from {:?}", from))] #[snafu(display("Failed to create column datatype from {:?}", from))]

View File

@@ -36,14 +36,15 @@ use datatypes::vectors::{
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
UInt64Vector, VectorRef, UInt64Vector, VectorRef,
}; };
use greptime_proto::v1;
use greptime_proto::v1::column_data_type_extension::TypeExt; use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr; use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request; use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query; use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData; use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{ use greptime_proto::v1::{
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonTypeExtension, QueryRequest,
QueryRequest, Row, SemanticType, VectorTypeExtension, Row, SemanticType,
}; };
use paste::paste; use paste::paste;
use snafu::prelude::*; use snafu::prelude::*;
@@ -86,7 +87,7 @@ impl ColumnDataTypeWrapper {
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension. /// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) { pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
(self.datatype, self.datatype_ext) (self.datatype, self.datatype_ext.clone())
} }
} }
@@ -149,17 +150,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ConcreteDataType::decimal128_default_datatype() ConcreteDataType::decimal128_default_datatype()
} }
} }
ColumnDataType::Vector => {
if let Some(TypeExt::VectorType(d)) = datatype_wrapper
.datatype_ext
.as_ref()
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
{
ConcreteDataType::vector_datatype(d.dim)
} else {
ConcreteDataType::vector_default_datatype()
}
}
} }
} }
} }
@@ -241,15 +231,6 @@ impl ColumnDataTypeWrapper {
}), }),
} }
} }
pub fn vector_datatype(dim: u32) -> Self {
ColumnDataTypeWrapper {
datatype: ColumnDataType::Vector,
datatype_ext: Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::VectorType(VectorTypeExtension { dim })),
}),
}
}
} }
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper { impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
@@ -268,7 +249,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64, ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
ConcreteDataType::Float32(_) => ColumnDataType::Float32, ConcreteDataType::Float32(_) => ColumnDataType::Float32,
ConcreteDataType::Float64(_) => ColumnDataType::Float64, ConcreteDataType::Float64(_) => ColumnDataType::Float64,
ConcreteDataType::Binary(_) => ColumnDataType::Binary, ConcreteDataType::Binary(_) | ConcreteDataType::Json(_) => ColumnDataType::Binary,
ConcreteDataType::String(_) => ColumnDataType::String, ConcreteDataType::String(_) => ColumnDataType::String,
ConcreteDataType::Date(_) => ColumnDataType::Date, ConcreteDataType::Date(_) => ColumnDataType::Date,
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime, ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
@@ -290,8 +271,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano, IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
}, },
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128, ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
ConcreteDataType::Json(_) => ColumnDataType::Json,
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_)
@@ -310,17 +289,15 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
})), })),
}) })
} }
ColumnDataType::Json => datatype.as_json().map(|_| ColumnDataTypeExtension { ColumnDataType::Binary => {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())), if datatype == ConcreteDataType::json_datatype() {
}), // Json is the same as binary in proto. The extension marks the binary in proto is actually a json.
ColumnDataType::Vector => { Some(ColumnDataTypeExtension {
datatype type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
.as_vector()
.map(|vector_type| ColumnDataTypeExtension {
type_ext: Some(TypeExt::VectorType(VectorTypeExtension {
dim: vector_type.dim as _,
})),
}) })
} else {
None
}
} }
_ => None, _ => None,
}; };
@@ -445,10 +422,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
string_values: Vec::with_capacity(capacity), string_values: Vec::with_capacity(capacity),
..Default::default() ..Default::default()
}, },
ColumnDataType::Vector => Values {
binary_values: Vec::with_capacity(capacity),
..Default::default()
},
} }
} }
@@ -527,14 +500,13 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
match request.expr { match request.expr {
Some(Expr::CreateDatabase(_)) => "ddl.create_database", Some(Expr::CreateDatabase(_)) => "ddl.create_database",
Some(Expr::CreateTable(_)) => "ddl.create_table", Some(Expr::CreateTable(_)) => "ddl.create_table",
Some(Expr::AlterTable(_)) => "ddl.alter_table", Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table", Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table", Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
Some(Expr::CreateFlow(_)) => "ddl.create_flow", Some(Expr::CreateFlow(_)) => "ddl.create_flow",
Some(Expr::DropFlow(_)) => "ddl.drop_flow", Some(Expr::DropFlow(_)) => "ddl.drop_flow",
Some(Expr::CreateView(_)) => "ddl.create_view", Some(Expr::CreateView(_)) => "ddl.create_view",
Some(Expr::DropView(_)) => "ddl.drop_view", Some(Expr::DropView(_)) => "ddl.drop_view",
Some(Expr::AlterDatabase(_)) => "ddl.alter_database",
None => "ddl.empty", None => "ddl.empty",
} }
} }
@@ -685,18 +657,14 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec( IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
values.interval_year_month_values, values.interval_year_month_values,
)), )),
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values( IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
values values.interval_day_time_values,
.interval_day_time_values
.iter()
.map(|x| IntervalDayTime::from_i64(*x).into()),
)), )),
IntervalType::MonthDayNano(_) => { IntervalType::MonthDayNano(_) => {
Arc::new(IntervalMonthDayNanoVector::from_iter_values( Arc::new(IntervalMonthDayNanoVector::from_iter_values(
values values.interval_month_day_nano_values.iter().map(|x| {
.interval_month_day_nano_values IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
.iter() }),
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
)) ))
} }
}, },
@@ -705,7 +673,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into() Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
}), }),
)), )),
ConcreteDataType::Vector(_) => Arc::new(BinaryVector::from_vec(values.binary_values)),
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_)
@@ -871,7 +838,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
)) ))
}) })
.collect(), .collect(),
ConcreteDataType::Vector(_) => values.binary_values.into_iter().map(|v| v.into()).collect(),
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_)
@@ -896,7 +862,10 @@ pub fn is_column_type_value_eq(
ColumnDataTypeWrapper::try_new(type_value, type_extension) ColumnDataTypeWrapper::try_new(type_value, type_extension)
.map(|wrapper| { .map(|wrapper| {
let datatype = ConcreteDataType::from(wrapper); let datatype = ConcreteDataType::from(wrapper);
expect_type == &datatype (datatype == *expect_type)
// Json type leverage binary type in pb, so this is valid.
|| (datatype == ConcreteDataType::binary_datatype()
&& *expect_type == ConcreteDataType::json_datatype())
}) })
.unwrap_or(false) .unwrap_or(false)
} }
@@ -1183,10 +1152,6 @@ mod tests {
let values = values_with_capacity(ColumnDataType::Decimal128, 2); let values = values_with_capacity(ColumnDataType::Decimal128, 2);
let values = values.decimal128_values; let values = values.decimal128_values;
assert_eq!(2, values.capacity()); assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::Vector, 2);
let values = values.binary_values;
assert_eq!(2, values.capacity());
} }
#[test] #[test]
@@ -1274,11 +1239,7 @@ mod tests {
assert_eq!( assert_eq!(
ConcreteDataType::decimal128_datatype(10, 2), ConcreteDataType::decimal128_datatype(10, 2),
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into() ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
); )
assert_eq!(
ConcreteDataType::vector_datatype(3),
ColumnDataTypeWrapper::vector_datatype(3).into()
);
} }
#[test] #[test]
@@ -1374,10 +1335,6 @@ mod tests {
.try_into() .try_into()
.unwrap() .unwrap()
); );
assert_eq!(
ColumnDataTypeWrapper::vector_datatype(3),
ConcreteDataType::vector_datatype(3).try_into().unwrap()
);
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into(); let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err()); assert!(result.is_err());
@@ -1499,22 +1456,14 @@ mod tests {
column.values.as_ref().unwrap().interval_year_month_values column.values.as_ref().unwrap().interval_year_month_values
); );
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![ let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
IntervalDayTime::new(0, 4).into(),
IntervalDayTime::new(0, 5).into(),
IntervalDayTime::new(0, 6).into(),
]));
push_vals(&mut column, 3, vector); push_vals(&mut column, 3, vector);
assert_eq!( assert_eq!(
vec![4, 5, 6], vec![4, 5, 6],
column.values.as_ref().unwrap().interval_day_time_values column.values.as_ref().unwrap().interval_day_time_values
); );
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![ let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
IntervalMonthDayNano::new(0, 0, 7).into(),
IntervalMonthDayNano::new(0, 0, 8).into(),
IntervalMonthDayNano::new(0, 0, 9).into(),
]));
let len = vector.len(); let len = vector.len();
push_vals(&mut column, 3, vector); push_vals(&mut column, 3, vector);
(0..len).for_each(|i| { (0..len).for_each(|i| {

View File

@@ -15,10 +15,8 @@
use std::collections::HashMap; use std::collections::HashMap;
use datatypes::schema::{ use datatypes::schema::{
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexType, ColumnDefaultConstraint, ColumnSchema, FulltextOptions, COMMENT_KEY, FULLTEXT_KEY,
COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
}; };
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
use snafu::ResultExt; use snafu::ResultExt;
use crate::error::{self, Result}; use crate::error::{self, Result};
@@ -27,15 +25,13 @@ use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
/// Key used to store fulltext options in gRPC column options. /// Key used to store fulltext options in gRPC column options.
const FULLTEXT_GRPC_KEY: &str = "fulltext"; const FULLTEXT_GRPC_KEY: &str = "fulltext";
/// Key used to store inverted index options in gRPC column options.
const INVERTED_INDEX_GRPC_KEY: &str = "inverted_index";
/// Key used to store skip index options in gRPC column options.
const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`. /// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> { pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
let data_type = let data_type = ColumnDataTypeWrapper::try_new(
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?; column_def.data_type,
column_def.datatype_extension.clone(),
)?;
let constraint = if column_def.default_constraint.is_empty() { let constraint = if column_def.default_constraint.is_empty() {
None None
@@ -53,16 +49,10 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
if !column_def.comment.is_empty() { if !column_def.comment.is_empty() {
metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone()); metadata.insert(COMMENT_KEY.to_string(), column_def.comment.clone());
} }
if let Some(options) = column_def.options.as_ref() { if let Some(options) = column_def.options.as_ref()
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) { && let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY)
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned()); {
} metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_string());
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
}
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
}
} }
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable) ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
@@ -80,17 +70,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) { if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
options options
.options .options
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned()); .insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_string());
}
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
options
.options
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), inverted_index.clone());
}
if let Some(skipping_index) = column_schema.metadata().get(SKIPPING_INDEX_KEY) {
options
.options
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
} }
(!options.options.is_empty()).then_some(options) (!options.options.is_empty()).then_some(options)
@@ -100,7 +80,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool { pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
options options
.as_ref() .as_ref()
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY)) .map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
} }
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`. /// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
@@ -113,21 +93,6 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
Ok((!options.options.is_empty()).then_some(options)) Ok((!options.options.is_empty()).then_some(options))
} }
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
match analyzer {
Analyzer::English => FulltextAnalyzer::English,
Analyzer::Chinese => FulltextAnalyzer::Chinese,
}
}
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
match skipping_index_type {
PbSkippingIndexType::BloomFilter => SkippingIndexType::BloomFilter,
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -150,13 +115,10 @@ mod tests {
comment: "test_comment".to_string(), comment: "test_comment".to_string(),
datatype_extension: None, datatype_extension: None,
options: Some(ColumnOptions { options: Some(ColumnOptions {
options: HashMap::from([ options: HashMap::from([(
( FULLTEXT_GRPC_KEY.to_string(),
FULLTEXT_GRPC_KEY.to_string(), "{\"enable\":true}".to_string(),
"{\"enable\":true}".to_string(), )]),
),
(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string()),
]),
}), }),
}; };
@@ -177,7 +139,6 @@ mod tests {
..Default::default() ..Default::default()
} }
); );
assert!(schema.is_inverted_indexed());
} }
#[test] #[test]
@@ -186,23 +147,18 @@ mod tests {
let options = options_from_column_schema(&schema); let options = options_from_column_schema(&schema);
assert!(options.is_none()); assert!(options.is_none());
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true) let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
.with_fulltext_options(FulltextOptions { .with_fulltext_options(FulltextOptions {
enable: true, enable: true,
analyzer: FulltextAnalyzer::English, analyzer: FulltextAnalyzer::English,
case_sensitive: false, case_sensitive: false,
}) })
.unwrap(); .unwrap();
schema.set_inverted_index(true);
let options = options_from_column_schema(&schema).unwrap(); let options = options_from_column_schema(&schema).unwrap();
assert_eq!( assert_eq!(
options.options.get(FULLTEXT_GRPC_KEY).unwrap(), options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}" "{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
); );
assert_eq!(
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
"true"
);
} }
#[test] #[test]

View File

@@ -25,7 +25,6 @@ pub enum PermissionReq<'a> {
GrpcRequest(&'a Request), GrpcRequest(&'a Request),
SqlStatement(&'a Statement), SqlStatement(&'a Statement),
PromQuery, PromQuery,
LogQuery,
Opentsdb, Opentsdb,
LineProtocol, LineProtocol,
PromStoreWrite, PromStoreWrite,

View File

@@ -11,3 +11,4 @@ common-macro.workspace = true
common-meta.workspace = true common-meta.workspace = true
moka.workspace = true moka.workspace = true
snafu.workspace = true snafu.workspace = true
substrait.workspace = true

62
src/cache/src/lib.rs vendored
View File

@@ -19,9 +19,9 @@ use std::time::Duration;
use catalog::kvbackend::new_table_cache; use catalog::kvbackend::new_table_cache;
use common_meta::cache::{ use common_meta::cache::{
new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry, new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
CacheRegistryBuilder, LayeredCacheRegistryBuilder, LayeredCacheRegistryBuilder,
}; };
use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::KvBackendRef;
use moka::future::CacheBuilder; use moka::future::CacheBuilder;
@@ -37,47 +37,9 @@ pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache"; pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache"; pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
pub const TABLE_CACHE_NAME: &str = "table_cache"; pub const TABLE_CACHE_NAME: &str = "table_cache";
pub const SCHEMA_CACHE_NAME: &str = "schema_cache";
pub const TABLE_SCHEMA_NAME_CACHE_NAME: &str = "table_schema_name_cache";
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache"; pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache"; pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
/// Builds cache registry for datanode, including:
/// - Schema cache.
/// - Table id to schema name cache.
pub fn build_datanode_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
// Builds table id schema name cache that never expires.
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY).build();
let table_id_schema_cache = Arc::new(new_table_schema_cache(
TABLE_SCHEMA_NAME_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds schema cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let schema_cache = Arc::new(new_schema_cache(
SCHEMA_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
CacheRegistryBuilder::default()
.add_cache(table_id_schema_cache)
.add_cache(schema_cache)
.build()
}
/// Builds cache registry for frontend and datanode, including:
/// - Table info cache
/// - Table name cache
/// - Table route cache
/// - Table flow node cache
/// - View cache
/// - Schema cache
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry { pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
// Builds table info cache // Builds table info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY) let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
@@ -133,30 +95,12 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
kv_backend.clone(), kv_backend.clone(),
)); ));
// Builds schema cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let schema_cache = Arc::new(new_schema_cache(
SCHEMA_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
let table_id_schema_cache = Arc::new(new_table_schema_cache(
TABLE_SCHEMA_NAME_CACHE_NAME.to_string(),
CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY).build(),
kv_backend,
));
CacheRegistryBuilder::default() CacheRegistryBuilder::default()
.add_cache(table_info_cache) .add_cache(table_info_cache)
.add_cache(table_name_cache) .add_cache(table_name_cache)
.add_cache(table_route_cache) .add_cache(table_route_cache)
.add_cache(view_info_cache) .add_cache(view_info_cache)
.add_cache(table_flownode_set_cache) .add_cache(table_flownode_set_cache)
.add_cache(schema_cache)
.add_cache(table_id_schema_cache)
.build() .build()
} }

View File

@@ -15,9 +15,10 @@ api.workspace = true
arrow.workspace = true arrow.workspace = true
arrow-schema.workspace = true arrow-schema.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait.workspace = true async-trait = "0.1"
bytes.workspace = true bytes.workspace = true
common-catalog.workspace = true common-catalog.workspace = true
common-config.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-meta.workspace = true common-meta.workspace = true
@@ -31,7 +32,7 @@ common-version.workspace = true
dashmap.workspace = true dashmap.workspace = true
datafusion.workspace = true datafusion.workspace = true
datatypes.workspace = true datatypes.workspace = true
futures.workspace = true futures = "0.3"
futures-util.workspace = true futures-util.workspace = true
humantime.workspace = true humantime.workspace = true
itertools.workspace = true itertools.workspace = true
@@ -39,7 +40,7 @@ lazy_static.workspace = true
meta-client.workspace = true meta-client.workspace = true
moka = { workspace = true, features = ["future", "sync"] } moka = { workspace = true, features = ["future", "sync"] }
partition.workspace = true partition.workspace = true
paste.workspace = true paste = "1.0"
prometheus.workspace = true prometheus.workspace = true
rustc-hash.workspace = true rustc-hash.workspace = true
serde_json.workspace = true serde_json.workspace = true
@@ -49,7 +50,7 @@ sql.workspace = true
store-api.workspace = true store-api.workspace = true
table.workspace = true table.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream.workspace = true tokio-stream = "0.1"
[dev-dependencies] [dev-dependencies]
cache.workspace = true cache.workspace = true
@@ -57,5 +58,7 @@ catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true chrono.workspace = true
common-meta = { workspace = true, features = ["testing"] } common-meta = { workspace = true, features = ["testing"] }
common-query = { workspace = true, features = ["testing"] } common-query = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
log-store.workspace = true
object-store.workspace = true object-store.workspace = true
tokio.workspace = true tokio.workspace = true

View File

@@ -64,13 +64,6 @@ pub enum Error {
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to list flow stats"))]
ListFlowStats {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to list flows in catalog {catalog}"))] #[snafu(display("Failed to list flows in catalog {catalog}"))]
ListFlows { ListFlows {
#[snafu(implicit)] #[snafu(implicit)]
@@ -122,6 +115,13 @@ pub enum Error {
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to re-compile script due to internal error"))]
CompileScriptInternal {
#[snafu(implicit)]
location: Location,
source: BoxedError,
},
#[snafu(display("Failed to create table, table info: {}", table_info))] #[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable { CreateTable {
table_info: String, table_info: String,
@@ -178,12 +178,6 @@ pub enum Error {
location: Location, location: Location,
}, },
#[snafu(display("Partition manager not found, it's not expected."))]
PartitionManagerNotFound {
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to find table partitions"))] #[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error }, FindPartitions { source: partition::error::Error },
@@ -307,7 +301,6 @@ impl ErrorExt for Error {
| Error::CastManager { .. } | Error::CastManager { .. }
| Error::Json { .. } | Error::Json { .. }
| Error::GetInformationExtension { .. } | Error::GetInformationExtension { .. }
| Error::PartitionManagerNotFound { .. }
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected, | Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments, Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
@@ -326,7 +319,6 @@ impl ErrorExt for Error {
| Error::ListSchemas { source, .. } | Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } | Error::ListTables { source, .. }
| Error::ListFlows { source, .. } | Error::ListFlows { source, .. }
| Error::ListFlowStats { source, .. }
| Error::ListProcedures { source, .. } | Error::ListProcedures { source, .. }
| Error::ListRegionStats { source, .. } | Error::ListRegionStats { source, .. }
| Error::ConvertProtoData { source, .. } => source.status_code(), | Error::ConvertProtoData { source, .. } => source.status_code(),
@@ -336,7 +328,9 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. } => source.status_code(), Error::DecodePlan { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::Internal { source, .. } => source.status_code(), Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code()
}
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied, Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None), Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),

View File

@@ -1,101 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use api::v1::meta::ProcedureStatus;
use common_error::ext::BoxedError;
use common_meta::cluster::{ClusterInfo, NodeInfo};
use common_meta::datanode::RegionStat;
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
use common_meta::key::flow::flow_state::FlowStat;
use common_meta::rpc::procedure;
use common_procedure::{ProcedureInfo, ProcedureState};
use meta_client::MetaClientRef;
use snafu::ResultExt;
use crate::error;
use crate::information_schema::InformationExtension;
pub struct DistributedInformationExtension {
meta_client: MetaClientRef,
}
impl DistributedInformationExtension {
pub fn new(meta_client: MetaClientRef) -> Self {
Self { meta_client }
}
}
#[async_trait::async_trait]
impl InformationExtension for DistributedInformationExtension {
type Error = crate::error::Error;
async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
self.meta_client
.list_nodes(None)
.await
.map_err(BoxedError::new)
.context(error::ListNodesSnafu)
}
async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
let procedures = self
.meta_client
.list_procedures(&ExecutorContext::default())
.await
.map_err(BoxedError::new)
.context(error::ListProceduresSnafu)?
.procedures;
let mut result = Vec::with_capacity(procedures.len());
for procedure in procedures {
let pid = match procedure.id {
Some(pid) => pid,
None => return error::ProcedureIdNotFoundSnafu {}.fail(),
};
let pid = procedure::pb_pid_to_pid(&pid)
.map_err(BoxedError::new)
.context(error::ConvertProtoDataSnafu)?;
let status = ProcedureStatus::try_from(procedure.status)
.map(|v| v.as_str_name())
.unwrap_or("Unknown")
.to_string();
let procedure_info = ProcedureInfo {
id: pid,
type_name: procedure.type_name,
start_time_ms: procedure.start_time_ms,
end_time_ms: procedure.end_time_ms,
state: ProcedureState::Running,
lock_keys: procedure.lock_keys,
};
result.push((status, procedure_info));
}
Ok(result)
}
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
self.meta_client
.list_region_stats()
.await
.map_err(BoxedError::new)
.context(error::ListRegionStatsSnafu)
}
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
self.meta_client
.list_flow_stats()
.await
.map_err(BoxedError::new)
.context(crate::error::ListFlowStatsSnafu)
}
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend}; pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend};
mod client; mod client;
mod manager; mod manager;

View File

@@ -22,7 +22,6 @@ use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator; use common_meta::cache_invalidator::KvCacheInvalidator;
use common_meta::error::Error::CacheNotGet; use common_meta::error::Error::CacheNotGet;
use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result}; use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result};
use common_meta::kv_backend::txn::{Txn, TxnResponse};
use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService}; use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
use common_meta::rpc::store::{ use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
@@ -43,20 +42,20 @@ const DEFAULT_CACHE_MAX_CAPACITY: u64 = 10000;
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60); const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60); const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
pub struct CachedKvBackendBuilder { pub struct CachedMetaKvBackendBuilder {
cache_max_capacity: Option<u64>, cache_max_capacity: Option<u64>,
cache_ttl: Option<Duration>, cache_ttl: Option<Duration>,
cache_tti: Option<Duration>, cache_tti: Option<Duration>,
inner: KvBackendRef, meta_client: Arc<MetaClient>,
} }
impl CachedKvBackendBuilder { impl CachedMetaKvBackendBuilder {
pub fn new(inner: KvBackendRef) -> Self { pub fn new(meta_client: Arc<MetaClient>) -> Self {
Self { Self {
cache_max_capacity: None, cache_max_capacity: None,
cache_ttl: None, cache_ttl: None,
cache_tti: None, cache_tti: None,
inner, meta_client,
} }
} }
@@ -75,7 +74,7 @@ impl CachedKvBackendBuilder {
self self
} }
pub fn build(self) -> CachedKvBackend { pub fn build(self) -> CachedMetaKvBackend {
let cache_max_capacity = self let cache_max_capacity = self
.cache_max_capacity .cache_max_capacity
.unwrap_or(DEFAULT_CACHE_MAX_CAPACITY); .unwrap_or(DEFAULT_CACHE_MAX_CAPACITY);
@@ -86,11 +85,14 @@ impl CachedKvBackendBuilder {
.time_to_live(cache_ttl) .time_to_live(cache_ttl)
.time_to_idle(cache_tti) .time_to_idle(cache_tti)
.build(); .build();
let kv_backend = self.inner;
let kv_backend = Arc::new(MetaKvBackend {
client: self.meta_client,
});
let name = format!("CachedKvBackend({})", kv_backend.name()); let name = format!("CachedKvBackend({})", kv_backend.name());
let version = AtomicUsize::new(0); let version = AtomicUsize::new(0);
CachedKvBackend { CachedMetaKvBackend {
kv_backend, kv_backend,
cache, cache,
name, name,
@@ -110,29 +112,19 @@ pub type CacheBackend = Cache<Vec<u8>, KeyValue>;
/// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related /// Therefore, it is recommended to use CachedMetaKvBackend to only read metadata related
/// information. Note: If you read other information, you may read expired data, which depends on /// information. Note: If you read other information, you may read expired data, which depends on
/// TTL and TTI for cache. /// TTL and TTI for cache.
pub struct CachedKvBackend { pub struct CachedMetaKvBackend {
kv_backend: KvBackendRef, kv_backend: KvBackendRef,
cache: CacheBackend, cache: CacheBackend,
name: String, name: String,
version: AtomicUsize, version: AtomicUsize,
} }
#[async_trait::async_trait] impl TxnService for CachedMetaKvBackend {
impl TxnService for CachedKvBackend {
type Error = Error; type Error = Error;
async fn txn(&self, txn: Txn) -> std::result::Result<TxnResponse, Self::Error> {
// TODO(hl): txn of CachedKvBackend simply pass through to inner backend without invalidating caches.
self.kv_backend.txn(txn).await
}
fn max_txn_ops(&self) -> usize {
self.kv_backend.max_txn_ops()
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl KvBackend for CachedKvBackend { impl KvBackend for CachedMetaKvBackend {
fn name(&self) -> &str { fn name(&self) -> &str {
&self.name &self.name
} }
@@ -303,7 +295,7 @@ impl KvBackend for CachedKvBackend {
.lock() .lock()
.unwrap() .unwrap()
.as_ref() .as_ref()
.is_some_and(|v| !self.validate_version(*v)) .map_or(false, |v| !self.validate_version(*v))
{ {
self.cache.invalidate(key).await; self.cache.invalidate(key).await;
} }
@@ -313,7 +305,7 @@ impl KvBackend for CachedKvBackend {
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl KvCacheInvalidator for CachedKvBackend { impl KvCacheInvalidator for CachedMetaKvBackend {
async fn invalidate_key(&self, key: &[u8]) { async fn invalidate_key(&self, key: &[u8]) {
self.create_new_version(); self.create_new_version();
self.cache.invalidate(key).await; self.cache.invalidate(key).await;
@@ -321,7 +313,7 @@ impl KvCacheInvalidator for CachedKvBackend {
} }
} }
impl CachedKvBackend { impl CachedMetaKvBackend {
// only for test // only for test
#[cfg(test)] #[cfg(test)]
fn wrap(kv_backend: KvBackendRef) -> Self { fn wrap(kv_backend: KvBackendRef) -> Self {
@@ -474,7 +466,7 @@ mod tests {
use common_meta::rpc::KeyValue; use common_meta::rpc::KeyValue;
use dashmap::DashMap; use dashmap::DashMap;
use super::CachedKvBackend; use super::CachedMetaKvBackend;
#[derive(Default)] #[derive(Default)]
pub struct SimpleKvBackend { pub struct SimpleKvBackend {
@@ -548,7 +540,7 @@ mod tests {
async fn test_cached_kv_backend() { async fn test_cached_kv_backend() {
let simple_kv = Arc::new(SimpleKvBackend::default()); let simple_kv = Arc::new(SimpleKvBackend::default());
let get_execute_times = simple_kv.get_execute_times.clone(); let get_execute_times = simple_kv.get_execute_times.clone();
let cached_kv = CachedKvBackend::wrap(simple_kv); let cached_kv = CachedMetaKvBackend::wrap(simple_kv);
add_some_vals(&cached_kv).await; add_some_vals(&cached_kv).await;

View File

@@ -38,7 +38,7 @@ pub fn new_table_cache(
) -> TableCache { ) -> TableCache {
let init = init_factory(table_info_cache, table_name_cache); let init = init_factory(table_info_cache, table_name_cache);
CacheContainer::new(name, cache, Box::new(invalidator), init, filter) CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
} }
fn init_factory( fn init_factory(

View File

@@ -30,7 +30,6 @@ use table::TableRef;
use crate::error::Result; use crate::error::Result;
pub mod error; pub mod error;
pub mod information_extension;
pub mod kvbackend; pub mod kvbackend;
pub mod memory; pub mod memory;
mod metrics; mod metrics;
@@ -41,7 +40,6 @@ pub mod information_schema {
} }
pub mod table_source; pub mod table_source;
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait CatalogManager: Send + Sync { pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any; fn as_any(&self) -> &dyn Any;

View File

@@ -35,7 +35,6 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME
use common_error::ext::ErrorExt; use common_error::ext::ErrorExt;
use common_meta::cluster::NodeInfo; use common_meta::cluster::NodeInfo;
use common_meta::datanode::RegionStat; use common_meta::datanode::RegionStat;
use common_meta::key::flow::flow_state::FlowStat;
use common_meta::key::flow::FlowMetadataManager; use common_meta::key::flow::FlowMetadataManager;
use common_procedure::ProcedureInfo; use common_procedure::ProcedureInfo;
use common_recordbatch::SendableRecordBatchStream; use common_recordbatch::SendableRecordBatchStream;
@@ -193,7 +192,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
)) as _), )) as _),
FLOWS => Some(Arc::new(InformationSchemaFlows::new( FLOWS => Some(Arc::new(InformationSchemaFlows::new(
self.catalog_name.clone(), self.catalog_name.clone(),
self.catalog_manager.clone(),
self.flow_metadata_manager.clone(), self.flow_metadata_manager.clone(),
)) as _), )) as _),
PROCEDURE_INFO => Some( PROCEDURE_INFO => Some(
@@ -340,9 +338,6 @@ pub trait InformationExtension {
/// Gets the region statistics. /// Gets the region statistics.
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>; async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
/// Get the flow statistics. If no flownode is available, return `None`.
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
} }
pub struct NoopInformationExtension; pub struct NoopInformationExtension;
@@ -362,8 +357,4 @@ impl InformationExtension for NoopInformationExtension {
async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> { async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
Ok(vec![]) Ok(vec![])
} }
async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
Ok(None)
}
} }

View File

@@ -64,7 +64,6 @@ const INIT_CAPACITY: usize = 42;
/// - `uptime`: the uptime of the peer. /// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer. /// - `active_time`: the time since the last activity of the peer.
/// ///
#[derive(Debug)]
pub(super) struct InformationSchemaClusterInfo { pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef, schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>, catalog_manager: Weak<dyn CatalogManager>,

View File

@@ -45,7 +45,6 @@ use crate::error::{
use crate::information_schema::Predicates; use crate::information_schema::Predicates;
use crate::CatalogManager; use crate::CatalogManager;
#[derive(Debug)]
pub(super) struct InformationSchemaColumns { pub(super) struct InformationSchemaColumns {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,

View File

@@ -12,12 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::sync::{Arc, Weak}; use std::sync::Arc;
use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::key::flow::flow_info::FlowInfoValue; use common_meta::key::flow::flow_info::FlowInfoValue;
use common_meta::key::flow::flow_state::FlowStat;
use common_meta::key::flow::FlowMetadataManager; use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::FlowId; use common_meta::key::FlowId;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
@@ -29,9 +28,7 @@ use datatypes::prelude::ConcreteDataType as CDT;
use datatypes::scalars::ScalarVectorBuilder; use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value; use datatypes::value::Value;
use datatypes::vectors::{ use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, VectorRef};
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
};
use futures::TryStreamExt; use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt}; use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId}; use store_api::storage::{ScanRequest, TableId};
@@ -41,8 +38,6 @@ use crate::error::{
}; };
use crate::information_schema::{Predicates, FLOWS}; use crate::information_schema::{Predicates, FLOWS};
use crate::system_schema::information_schema::InformationTable; use crate::system_schema::information_schema::InformationTable;
use crate::system_schema::utils;
use crate::CatalogManager;
const INIT_CAPACITY: usize = 42; const INIT_CAPACITY: usize = 42;
@@ -50,7 +45,6 @@ const INIT_CAPACITY: usize = 42;
// pk is (flow_name, flow_id, table_catalog) // pk is (flow_name, flow_id, table_catalog)
pub const FLOW_NAME: &str = "flow_name"; pub const FLOW_NAME: &str = "flow_name";
pub const FLOW_ID: &str = "flow_id"; pub const FLOW_ID: &str = "flow_id";
pub const STATE_SIZE: &str = "state_size";
pub const TABLE_CATALOG: &str = "table_catalog"; pub const TABLE_CATALOG: &str = "table_catalog";
pub const FLOW_DEFINITION: &str = "flow_definition"; pub const FLOW_DEFINITION: &str = "flow_definition";
pub const COMMENT: &str = "comment"; pub const COMMENT: &str = "comment";
@@ -61,24 +55,20 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
pub const OPTIONS: &str = "options"; pub const OPTIONS: &str = "options";
/// The `information_schema.flows` to provides information about flows in databases. /// The `information_schema.flows` to provides information about flows in databases.
#[derive(Debug)]
pub(super) struct InformationSchemaFlows { pub(super) struct InformationSchemaFlows {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>, flow_metadata_manager: Arc<FlowMetadataManager>,
} }
impl InformationSchemaFlows { impl InformationSchemaFlows {
pub(super) fn new( pub(super) fn new(
catalog_name: String, catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>, flow_metadata_manager: Arc<FlowMetadataManager>,
) -> Self { ) -> Self {
Self { Self {
schema: Self::schema(), schema: Self::schema(),
catalog_name, catalog_name,
catalog_manager,
flow_metadata_manager, flow_metadata_manager,
} }
} }
@@ -90,7 +80,6 @@ impl InformationSchemaFlows {
vec![ vec![
(FLOW_NAME, CDT::string_datatype(), false), (FLOW_NAME, CDT::string_datatype(), false),
(FLOW_ID, CDT::uint32_datatype(), false), (FLOW_ID, CDT::uint32_datatype(), false),
(STATE_SIZE, CDT::uint64_datatype(), true),
(TABLE_CATALOG, CDT::string_datatype(), false), (TABLE_CATALOG, CDT::string_datatype(), false),
(FLOW_DEFINITION, CDT::string_datatype(), false), (FLOW_DEFINITION, CDT::string_datatype(), false),
(COMMENT, CDT::string_datatype(), true), (COMMENT, CDT::string_datatype(), true),
@@ -110,7 +99,6 @@ impl InformationSchemaFlows {
InformationSchemaFlowsBuilder::new( InformationSchemaFlowsBuilder::new(
self.schema.clone(), self.schema.clone(),
self.catalog_name.clone(), self.catalog_name.clone(),
self.catalog_manager.clone(),
&self.flow_metadata_manager, &self.flow_metadata_manager,
) )
} }
@@ -156,12 +144,10 @@ impl InformationTable for InformationSchemaFlows {
struct InformationSchemaFlowsBuilder { struct InformationSchemaFlowsBuilder {
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: Arc<FlowMetadataManager>, flow_metadata_manager: Arc<FlowMetadataManager>,
flow_names: StringVectorBuilder, flow_names: StringVectorBuilder,
flow_ids: UInt32VectorBuilder, flow_ids: UInt32VectorBuilder,
state_sizes: UInt64VectorBuilder,
table_catalogs: StringVectorBuilder, table_catalogs: StringVectorBuilder,
raw_sqls: StringVectorBuilder, raw_sqls: StringVectorBuilder,
comments: StringVectorBuilder, comments: StringVectorBuilder,
@@ -176,18 +162,15 @@ impl InformationSchemaFlowsBuilder {
fn new( fn new(
schema: SchemaRef, schema: SchemaRef,
catalog_name: String, catalog_name: String,
catalog_manager: Weak<dyn CatalogManager>,
flow_metadata_manager: &Arc<FlowMetadataManager>, flow_metadata_manager: &Arc<FlowMetadataManager>,
) -> Self { ) -> Self {
Self { Self {
schema, schema,
catalog_name, catalog_name,
catalog_manager,
flow_metadata_manager: flow_metadata_manager.clone(), flow_metadata_manager: flow_metadata_manager.clone(),
flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY), flow_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY), flow_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
state_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY), table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY), raw_sqls: StringVectorBuilder::with_capacity(INIT_CAPACITY),
comments: StringVectorBuilder::with_capacity(INIT_CAPACITY), comments: StringVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -212,11 +195,6 @@ impl InformationSchemaFlowsBuilder {
.flow_names(&catalog_name) .flow_names(&catalog_name)
.await; .await;
let flow_stat = {
let information_extension = utils::information_extension(&self.catalog_manager)?;
information_extension.flow_stats().await?
};
while let Some((flow_name, flow_id)) = stream while let Some((flow_name, flow_id)) = stream
.try_next() .try_next()
.await .await
@@ -235,7 +213,7 @@ impl InformationSchemaFlowsBuilder {
catalog_name: catalog_name.to_string(), catalog_name: catalog_name.to_string(),
flow_name: flow_name.to_string(), flow_name: flow_name.to_string(),
})?; })?;
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?; self.add_flow(&predicates, flow_id.flow_id(), flow_info)?;
} }
self.finish() self.finish()
@@ -246,7 +224,6 @@ impl InformationSchemaFlowsBuilder {
predicates: &Predicates, predicates: &Predicates,
flow_id: FlowId, flow_id: FlowId,
flow_info: FlowInfoValue, flow_info: FlowInfoValue,
flow_stat: &Option<FlowStat>,
) -> Result<()> { ) -> Result<()> {
let row = [ let row = [
(FLOW_NAME, &Value::from(flow_info.flow_name().to_string())), (FLOW_NAME, &Value::from(flow_info.flow_name().to_string())),
@@ -261,11 +238,6 @@ impl InformationSchemaFlowsBuilder {
} }
self.flow_names.push(Some(flow_info.flow_name())); self.flow_names.push(Some(flow_info.flow_name()));
self.flow_ids.push(Some(flow_id)); self.flow_ids.push(Some(flow_id));
self.state_sizes.push(
flow_stat
.as_ref()
.and_then(|state| state.state_size.get(&flow_id).map(|v| *v as u64)),
);
self.table_catalogs.push(Some(flow_info.catalog_name())); self.table_catalogs.push(Some(flow_info.catalog_name()));
self.raw_sqls.push(Some(flow_info.raw_sql())); self.raw_sqls.push(Some(flow_info.raw_sql()));
self.comments.push(Some(flow_info.comment())); self.comments.push(Some(flow_info.comment()));
@@ -298,7 +270,6 @@ impl InformationSchemaFlowsBuilder {
let columns: Vec<VectorRef> = vec![ let columns: Vec<VectorRef> = vec![
Arc::new(self.flow_names.finish()), Arc::new(self.flow_names.finish()),
Arc::new(self.flow_ids.finish()), Arc::new(self.flow_ids.finish()),
Arc::new(self.state_sizes.finish()),
Arc::new(self.table_catalogs.finish()), Arc::new(self.table_catalogs.finish()),
Arc::new(self.raw_sqls.finish()), Arc::new(self.raw_sqls.finish()),
Arc::new(self.comments.finish()), Arc::new(self.comments.finish()),

Some files were not shown because too many files have changed in this diff Show More