mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 21:32:58 +00:00
Compare commits
254 Commits
v0.12.0-ni
...
feat/objbe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0d5b423eb7 | ||
|
|
26bdb6a413 | ||
|
|
2fe21469f8 | ||
|
|
3aa67c7af4 | ||
|
|
e0d3e6ae97 | ||
|
|
2ce476dc42 | ||
|
|
69a816fa0c | ||
|
|
dcf5a62014 | ||
|
|
f3aa967aae | ||
|
|
93e8510b2a | ||
|
|
53c58494fd | ||
|
|
741c5e2fb1 | ||
|
|
d68215dc88 | ||
|
|
bcd63fdb87 | ||
|
|
f4c527cddf | ||
|
|
8da5949fc5 | ||
|
|
db6a63ef6c | ||
|
|
f166b93b02 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be | ||
|
|
286f225e50 | ||
|
|
4f988b5ba9 | ||
|
|
500d0852eb | ||
|
|
8d05fb3503 | ||
|
|
d7b6718be0 | ||
|
|
6f0783e17e | ||
|
|
d69e93b91a | ||
|
|
76083892cd | ||
|
|
7981c06989 | ||
|
|
97bb1519f8 | ||
|
|
1d8c9c1843 | ||
|
|
71007e200c | ||
|
|
a0ff9e751e | ||
|
|
f6f617d667 | ||
|
|
e8788088a8 | ||
|
|
53b25c04a2 | ||
|
|
62a8b8b9dc | ||
|
|
c8bdeaaa6a | ||
|
|
81da18e5df | ||
|
|
7c65fddb30 | ||
|
|
421e38c481 | ||
|
|
aada5c1706 | ||
|
|
aa8f119bbb | ||
|
|
19a6d15849 | ||
|
|
073aaefe65 | ||
|
|
77223a0f3e | ||
|
|
4ef038d098 | ||
|
|
deb9520970 | ||
|
|
6bba5e0afa | ||
|
|
f359eeb667 | ||
|
|
009dbad581 | ||
|
|
a2047b096c | ||
|
|
6e8b1ba004 | ||
|
|
7fc935c61c | ||
|
|
1e6d2fb1fa | ||
|
|
0d19e8f089 | ||
|
|
c56106b883 | ||
|
|
edb040dea3 | ||
|
|
7bbc87b3c0 | ||
|
|
858dae7b23 | ||
|
|
33a2485f54 | ||
|
|
8ebf454bc1 | ||
|
|
f5b9ade6df | ||
|
|
9c1834accd | ||
|
|
918517d221 | ||
|
|
92d9e81a9f | ||
|
|
224b1d15cd | ||
|
|
b4d5393080 | ||
|
|
73c29bb482 | ||
|
|
198ee87675 | ||
|
|
02af9dd21a | ||
|
|
bb97f1bf16 | ||
|
|
fbd5316fdb | ||
|
|
63d5a69a31 | ||
|
|
954310f917 | ||
|
|
58c6274bf6 | ||
|
|
46947fd1de | ||
|
|
44fffdec8b | ||
|
|
8026b1d72c | ||
|
|
e22aa819be | ||
|
|
beb9c0a797 | ||
|
|
5f6f5e980a | ||
|
|
ccfa40dc41 | ||
|
|
336b941113 | ||
|
|
de3f817596 | ||
|
|
d094f48822 | ||
|
|
342883e922 | ||
|
|
5be81abba3 | ||
|
|
c19ecd7ea2 | ||
|
|
15f4b10065 | ||
|
|
c100a2d1a6 | ||
|
|
ccb1978c98 | ||
|
|
480b05c590 | ||
|
|
0de0fd80b0 | ||
|
|
059cb6fdc3 | ||
|
|
29218b5fe7 | ||
|
|
59e6ec0395 | ||
|
|
79ee230f2a | ||
|
|
0e4bd59fac | ||
|
|
6eccadbf73 | ||
|
|
f29a1c56e9 | ||
|
|
88c3d331a1 | ||
|
|
79acc9911e | ||
|
|
0a169980b7 | ||
|
|
c80d2a3222 | ||
|
|
116bdaf690 | ||
|
|
6341fb86c7 | ||
|
|
fa09e181be | ||
|
|
ab4663ec2b | ||
|
|
fac22575aa | ||
|
|
0e249f69cd | ||
|
|
5d1761f3e5 | ||
|
|
dba6da4d00 | ||
|
|
59b31372aa | ||
|
|
d6b8672e63 | ||
|
|
deaa1f9578 | ||
|
|
f378d218e9 | ||
|
|
5b6279f191 | ||
|
|
698b28c636 | ||
|
|
c4d10313e6 | ||
|
|
f165bfb0af | ||
|
|
4111c18d44 | ||
|
|
5abe4c141a | ||
|
|
adb5c3743c | ||
|
|
7c5ead90ac | ||
|
|
d870987a65 | ||
|
|
dce4ed9f1d | ||
|
|
bbfbc9f0f8 | ||
|
|
b107384cc6 | ||
|
|
2802c8bf28 | ||
|
|
9b9784a557 | ||
|
|
1e61d05211 | ||
|
|
d53b9fbd03 | ||
|
|
d01bc916f1 | ||
|
|
8ea463f516 | ||
|
|
088317fd3a | ||
|
|
69881e3bc1 | ||
|
|
9af4160068 | ||
|
|
45e68603a1 | ||
|
|
1eb4b8ed4f | ||
|
|
05f21679d6 | ||
|
|
35b635f639 | ||
|
|
3ed085459c | ||
|
|
51a8d0a726 | ||
|
|
965a48656f | ||
|
|
4259975be9 | ||
|
|
d2f3f2e24d | ||
|
|
f74a955504 | ||
|
|
6f1b5101a3 | ||
|
|
9f626ec776 | ||
|
|
0163ce8df9 | ||
|
|
2ab235ec9d | ||
|
|
281d9a5920 | ||
|
|
385b1bcbb0 | ||
|
|
5287d46073 | ||
|
|
64ce9d3744 | ||
|
|
80790daae0 | ||
|
|
5daac5fe3d | ||
|
|
4323c20d18 | ||
|
|
f53b6777cc | ||
|
|
87c21e2baa | ||
|
|
d072801ad6 | ||
|
|
0607b38a20 | ||
|
|
e0384a7d46 | ||
|
|
d73815ba84 | ||
|
|
c78a492863 | ||
|
|
859717c309 | ||
|
|
52697a9e66 | ||
|
|
f8d26b433e | ||
|
|
1acfb6ed1c | ||
|
|
7eaabb3ca2 | ||
|
|
3a55f5d17c | ||
|
|
8d5d4000e6 | ||
|
|
a598008ec3 | ||
|
|
86bd54194a | ||
|
|
ccd2b06b7a | ||
|
|
0db10a33d0 | ||
|
|
317fe9eaa5 | ||
|
|
a4761d6245 | ||
|
|
758aef39d8 | ||
|
|
4e3dd04f42 | ||
|
|
18b77408ae | ||
|
|
725d5a9e68 | ||
|
|
4f29e50ef3 | ||
|
|
121ec7936f | ||
|
|
0185a65905 | ||
|
|
f0d30a0f26 | ||
|
|
7e61d1ae27 | ||
|
|
e56dd20426 | ||
|
|
b64c075cdb | ||
|
|
57f8afcb70 | ||
|
|
bd37e086c2 | ||
|
|
66f63ae981 | ||
|
|
95b20592ac | ||
|
|
1855dccdf1 | ||
|
|
5efcb41310 | ||
|
|
f5829364a2 | ||
|
|
87bd12d6df | ||
|
|
c370b4b40d | ||
|
|
3f01f67f94 | ||
|
|
6eb746d994 | ||
|
|
03a144fa56 | ||
|
|
f069ea082f | ||
|
|
9ae48010f0 | ||
|
|
3a996c2f00 | ||
|
|
45d4065fd6 | ||
|
|
9e09be7ba6 | ||
|
|
50583815de | ||
|
|
24ea9cf215 | ||
|
|
78d0fa75c9 | ||
|
|
0685ba265c | ||
|
|
be22da775a | ||
|
|
d33309be2b | ||
|
|
fdbfebf4be | ||
|
|
812a775b3d | ||
|
|
751fa4ede9 | ||
|
|
03a2e6d0c1 | ||
|
|
815ce59a3a | ||
|
|
c19a56c79f | ||
|
|
7f307a4cac | ||
|
|
52eebfce77 | ||
|
|
e18416a726 | ||
|
|
d1f8ea7880 | ||
|
|
2cd1b08ff7 | ||
|
|
0ee41339aa | ||
|
|
369b59c84a | ||
|
|
c305b2b406 | ||
|
|
c89ef85902 | ||
|
|
3d9df822ad | ||
|
|
bc2f05d949 | ||
|
|
05f115e047 | ||
|
|
5cf9d7b6ca | ||
|
|
a1cd194d0c | ||
|
|
a56c430db0 | ||
|
|
6a1ec8db25 | ||
|
|
04708f10aa | ||
|
|
ddf36c8324 | ||
|
|
96b2a5fb28 | ||
|
|
bbbba29afc | ||
|
|
b229c94fba | ||
|
|
2ad50332cb | ||
|
|
513569ed5d |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Databse Engine
|
## [Module] Database Engine
|
||||||
/src/index @zhongzc
|
/src/index @zhongzc
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag
|
/src/query @evenyag
|
||||||
|
|||||||
@@ -41,7 +41,14 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image
|
- name: Set up qemu for multi-platform builds
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -52,7 +59,7 @@ runs:
|
|||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image
|
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -69,8 +76,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
|
||||||
|
|||||||
10
.github/actions/build-greptime-images/action.yml
vendored
10
.github/actions/build-greptime-images/action.yml
vendored
@@ -34,8 +34,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -47,7 +47,11 @@ runs:
|
|||||||
password: ${{ inputs.image-registry-password }}
|
password: ${{ inputs.image-registry-password }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Set up qemu for multi-platform builds
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
- name: Set up buildx
|
- name: Set up buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|||||||
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
|
|||||||
@@ -48,8 +48,7 @@ runs:
|
|||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime
|
- name: Build greptime # Builds standard greptime binary
|
||||||
if: ${{ inputs.dev-mode == 'false' }}
|
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ runs:
|
|||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
env:
|
env:
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
SQLNESS_OPTS: "--preserve-state"
|
SQLNESS_OPTS: "--preserve-state"
|
||||||
|
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
|
|||||||
18
.github/actions/release-cn-artifacts/action.yaml
vendored
18
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
upload-to-s3:
|
upload-to-s3:
|
||||||
description: Upload to S3
|
description: Upload to S3
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -64,11 +64,11 @@ inputs:
|
|||||||
upload-max-retry-times:
|
upload-max-retry-times:
|
||||||
description: Max retry times for uploading artifacts to S3
|
description: Max retry times for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "20"
|
default: "30"
|
||||||
upload-retry-timeout:
|
upload-retry-timeout:
|
||||||
description: Timeout for uploading artifacts to S3
|
description: Timeout for uploading artifacts to S3
|
||||||
required: false
|
required: false
|
||||||
default: "30" # minutes
|
default: "120" # minutes
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -77,13 +77,21 @@ runs:
|
|||||||
with:
|
with:
|
||||||
path: ${{ inputs.artifacts-dir }}
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
|
- name: Install s5cmd
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||||
|
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||||
|
sudo mv s5cmd /usr/local/bin/
|
||||||
|
sudo chmod +x /usr/local/bin/s5cmd
|
||||||
|
|
||||||
- name: Release artifacts to cn region
|
- name: Release artifacts to cn region
|
||||||
uses: nick-invision/retry@v2
|
uses: nick-invision/retry@v2
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||||
env:
|
env:
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
AWS_REGION: ${{ inputs.aws-cn-region }}
|
||||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||||
with:
|
with:
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ inputs:
|
|||||||
default: 2
|
default: 2
|
||||||
description: "Number of Datanode replicas"
|
description: "Number of Datanode replicas"
|
||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 1
|
default: 2
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
description: "Image registry"
|
description: "Image registry"
|
||||||
image-repository:
|
image-repository:
|
||||||
default: "greptime/greptimedb"
|
default: "greptime/greptimedb"
|
||||||
description: "Image repository"
|
description: "Image repository"
|
||||||
image-tag:
|
image-tag:
|
||||||
default: "latest"
|
default: "latest"
|
||||||
description: 'Image tag'
|
description: 'Image tag'
|
||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
@@ -32,12 +32,12 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
uses: nick-fields/retry@v3
|
uses: nick-fields/retry@v3
|
||||||
with:
|
with:
|
||||||
timeout_minutes: 3
|
timeout_minutes: 3
|
||||||
max_attempts: 3
|
max_attempts: 3
|
||||||
shell: bash
|
shell: bash
|
||||||
command: |
|
command: |
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install \
|
--install \
|
||||||
@@ -48,10 +48,10 @@ runs:
|
|||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Install GreptimeDB cluster
|
- name: Install GreptimeDB cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
@@ -59,7 +59,7 @@ runs:
|
|||||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
--set base.podTemplate.main.resources.limits.cpu=2000m \
|
||||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
--set base.podTemplate.main.resources.limits.memory=3Gi \
|
||||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||||
@@ -72,7 +72,7 @@ runs:
|
|||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
while true; do
|
while true; do
|
||||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||||
if [ "$PHASE" == "Running" ]; then
|
if [ "$PHASE" == "Running" ]; then
|
||||||
echo "Cluster is ready"
|
echo "Cluster is ready"
|
||||||
@@ -86,10 +86,10 @@ runs:
|
|||||||
- name: Print GreptimeDB info
|
- name: Print GreptimeDB info
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl get all --show-labels -n my-greptimedb
|
kubectl get all --show-labels -n my-greptimedb
|
||||||
- name: Describe Nodes
|
- name: Describe Nodes
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ meta:
|
|||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
global_rt_size = 4
|
global_rt_size = 4
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
num_topics = 3
|
num_topics = 3
|
||||||
|
auto_prune_interval = "30s"
|
||||||
|
trigger_flush_threshold = 100
|
||||||
|
|
||||||
|
|
||||||
[datanode]
|
[datanode]
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "120s"
|
timeout = "120s"
|
||||||
@@ -21,7 +22,7 @@ datanode:
|
|||||||
[wal]
|
[wal]
|
||||||
provider = "kafka"
|
provider = "kafka"
|
||||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
linger = "2ms"
|
overwrite_entry_start_id = true
|
||||||
frontend:
|
frontend:
|
||||||
configData: |-
|
configData: |-
|
||||||
[runtime]
|
[runtime]
|
||||||
|
|||||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
6
.github/actions/upload-artifacts/action.yml
vendored
6
.github/actions/upload-artifacts/action.yml
vendored
@@ -30,9 +30,9 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0
|
# greptime-linux-amd64-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
|||||||
15
.github/labeler.yaml
vendored
Normal file
15
.github/labeler.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
ci:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: .github/**
|
||||||
|
|
||||||
|
docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docker/**
|
||||||
|
|
||||||
|
documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docs/**
|
||||||
|
|
||||||
|
dashboard:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: grafana/**
|
||||||
42
.github/scripts/check-version.sh
vendored
Executable file
42
.github/scripts/check-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Get current version
|
||||||
|
CURRENT_VERSION=$1
|
||||||
|
if [ -z "$CURRENT_VERSION" ]; then
|
||||||
|
echo "Error: Failed to get current version"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the latest version from GitHub Releases
|
||||||
|
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||||
|
|
||||||
|
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||||
|
echo "Error: Failed to fetch latest version from GitHub"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the latest version
|
||||||
|
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||||
|
|
||||||
|
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||||
|
echo "Error: No valid version found in GitHub releases"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||||
|
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||||
|
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||||
|
|
||||||
|
echo "Current version: $CLEAN_CURRENT"
|
||||||
|
echo "Latest release version: $CLEAN_LATEST"
|
||||||
|
|
||||||
|
# Use sort -V to compare versions
|
||||||
|
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||||
|
|
||||||
|
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||||
|
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||||
|
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||||
|
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
29
.github/scripts/create-version.sh
vendored
29
.github/scripts/create-version.sh
vendored
@@ -8,24 +8,25 @@ set -e
|
|||||||
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
|
||||||
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
|
||||||
function create_version() {
|
function create_version() {
|
||||||
# Read from envrionment variables.
|
# Read from environment variables.
|
||||||
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
if [ -z "$GITHUB_EVENT_NAME" ]; then
|
||||||
echo "GITHUB_EVENT_NAME is empty"
|
echo "GITHUB_EVENT_NAME is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
if [ -z "$NEXT_RELEASE_VERSION" ]; then
|
||||||
echo "NEXT_RELEASE_VERSION is empty"
|
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2
|
||||||
exit 1
|
# NOTE: Need a `v` prefix for the version string.
|
||||||
|
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
|
||||||
echo "NIGHTLY_RELEASE_PREFIX is empty"
|
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
|
||||||
# It will be like 'nigtly-20230808-7d0d8dc6'.
|
# It will be like 'nightly-20230808-7d0d8dc6'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
|
||||||
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -35,7 +36,7 @@ function create_version() {
|
|||||||
# It will be like 'dev-2023080819-f0e7216c'.
|
# It will be like 'dev-2023080819-f0e7216c'.
|
||||||
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
if [ "$NEXT_RELEASE_VERSION" = dev ]; then
|
||||||
if [ -z "$COMMIT_SHA" ]; then
|
if [ -z "$COMMIT_SHA" ]; then
|
||||||
echo "COMMIT_SHA is empty in dev build"
|
echo "COMMIT_SHA is empty in dev build" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
|
||||||
@@ -45,7 +46,7 @@ function create_version() {
|
|||||||
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
|
||||||
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
if [ "$GITHUB_EVENT_NAME" = push ]; then
|
||||||
if [ -z "$GITHUB_REF_NAME" ]; then
|
if [ -z "$GITHUB_REF_NAME" ]; then
|
||||||
echo "GITHUB_REF_NAME is empty in push event"
|
echo "GITHUB_REF_NAME is empty in push event" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "$GITHUB_REF_NAME"
|
echo "$GITHUB_REF_NAME"
|
||||||
@@ -54,15 +55,15 @@ function create_version() {
|
|||||||
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
|
||||||
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
|
||||||
else
|
else
|
||||||
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
|
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# You can run as following examples:
|
# You can run as following examples:
|
||||||
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
|
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh
|
||||||
create_version
|
create_version
|
||||||
|
|||||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -10,7 +10,7 @@ GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
|||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
|
||||||
# Ceate a cluster with 1 control-plane node and 5 workers.
|
# Create a cluster with 1 control-plane node and 5 workers.
|
||||||
function create_kind_cluster() {
|
function create_kind_cluster() {
|
||||||
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
|||||||
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
37
.github/scripts/update-dev-builder-version.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DEV_BUILDER_IMAGE_TAG=$1
|
||||||
|
|
||||||
|
update_dev_builder_version() {
|
||||||
|
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
|
||||||
|
echo "Error: Should specify the dev-builder image tag"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email greptimedb-ci@greptime.com
|
||||||
|
git config --global user.name greptimedb-ci
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update the dev-builder image tag in the Makefile.
|
||||||
|
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add Makefile
|
||||||
|
git commit -m "ci: update dev-builder image tag"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "ci: update dev-builder image tag" \
|
||||||
|
--body "This PR updates the dev-builder image tag" \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_dev_builder_version
|
||||||
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
46
.github/scripts/update-helm-charts-version.sh
vendored
Executable file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_helm_charts_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-helm-charts-version@greptime.com
|
||||||
|
git config --global user.name update-helm-charts-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
|
||||||
|
cd helm-charts
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/helm-charts
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
|
||||||
|
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Update docs.
|
||||||
|
make docs
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_helm_charts_version
|
||||||
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
42
.github/scripts/update-homebrew-greptme-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
VERSION=${VERSION}
|
||||||
|
GITHUB_TOKEN=${GITHUB_TOKEN}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version() {
|
||||||
|
# Configure Git configs.
|
||||||
|
git config --global user.email update-greptime-version@greptime.com
|
||||||
|
git config --global user.name update-greptime-version
|
||||||
|
|
||||||
|
# Clone helm-charts repository.
|
||||||
|
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
|
||||||
|
cd homebrew-greptime
|
||||||
|
|
||||||
|
# Set default remote for gh CLI
|
||||||
|
gh repo set-default GreptimeTeam/homebrew-greptime
|
||||||
|
|
||||||
|
# Checkout a new branch.
|
||||||
|
BRANCH_NAME="chore/greptimedb-${VERSION}"
|
||||||
|
git checkout -b $BRANCH_NAME
|
||||||
|
|
||||||
|
# Update version.
|
||||||
|
make update-greptime-version VERSION=${VERSION}
|
||||||
|
|
||||||
|
# Commit the changes.
|
||||||
|
git add .
|
||||||
|
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
|
||||||
|
git push origin $BRANCH_NAME
|
||||||
|
|
||||||
|
# Create a Pull Request.
|
||||||
|
gh pr create \
|
||||||
|
--title "chore: Update GreptimeDB version to ${VERSION}" \
|
||||||
|
--body "This PR updates the GreptimeDB version." \
|
||||||
|
--base main \
|
||||||
|
--head $BRANCH_NAME \
|
||||||
|
--reviewer zyy17 \
|
||||||
|
--reviewer daviderli614
|
||||||
|
}
|
||||||
|
|
||||||
|
update_homebrew_greptime_version
|
||||||
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
16
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,13 +27,13 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@@ -41,11 +41,11 @@ function upload_artifacts() {
|
|||||||
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
function update_version_info() {
|
function update_version_info() {
|
||||||
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
|||||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||||
echo "Updating latest-nightly-version.txt"
|
echo "Updating latest-nightly-version.txt"
|
||||||
echo "$VERSION" > latest-nightly-version.txt
|
echo "$VERSION" > latest-nightly-version.txt
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
4
.github/workflows/apidoc.yml
vendored
4
.github/workflows/apidoc.yml
vendored
@@ -14,9 +14,11 @@ name: Build API docs
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
5
.github/workflows/dependency-check.yml
vendored
5
.github/workflows/dependency-check.yml
vendored
@@ -1,9 +1,6 @@
|
|||||||
name: Check Dependencies
|
name: Check Dependencies
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@@ -15,6 +12,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Set up Rust
|
- name: Set up Rust
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
|||||||
60
.github/workflows/dev-build.yml
vendored
60
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -55,6 +55,11 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
|
upload_artifacts_to_s3:
|
||||||
|
type: boolean
|
||||||
|
description: Whether upload artifacts to s3
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
cargo_profile:
|
cargo_profile:
|
||||||
type: choice
|
type: choice
|
||||||
description: The cargo profile to use in building GreptimeDB.
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
@@ -76,20 +81,14 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
# Use the different image name to avoid conflict with the release images.
|
|
||||||
IMAGE_NAME: greptimedb-dev
|
|
||||||
|
|
||||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -107,6 +106,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -161,6 +161,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -168,6 +169,7 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
persist-credentials: true
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -192,6 +194,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -199,6 +202,7 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
persist-credentials: true
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -219,26 +223,34 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ env.IMAGE_NAME }}
|
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
|
- name: Echo Docker image tag to step summary
|
||||||
|
run: |
|
||||||
|
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
run: |
|
run: |
|
||||||
@@ -251,19 +263,20 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ env.IMAGE_NAME }}
|
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -273,6 +286,7 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -281,7 +295,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -291,6 +305,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -306,7 +321,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -316,6 +331,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -333,11 +349,17 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
246
.github/workflows/develop.yml
vendored
246
.github/workflows/develop.yml
vendored
@@ -1,4 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 15 * * 1-5"
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
@@ -20,10 +22,13 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
- name: Check the config docs
|
- name: Check the config docs
|
||||||
run: |
|
run: |
|
||||||
@@ -32,21 +37,27 @@ jobs:
|
|||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-2022, ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -57,35 +68,38 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Clippy` job
|
# Shares with `Clippy` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --locked --workspace --all-targets
|
run: cargo check --locked --workspace --all-targets
|
||||||
|
|
||||||
toml:
|
toml:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
persist-credentials: false
|
||||||
shared-key: "check-toml"
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||||
- name: Run taplo
|
- name: Run taplo
|
||||||
run: taplo format --check
|
run: taplo format --check
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binaries
|
name: Build GreptimeDB binaries
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -94,13 +108,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-binaries"
|
shared-key: "build-binaries"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -116,6 +132,7 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
fuzztest:
|
fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test
|
name: Fuzz Test
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -138,15 +155,12 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -175,11 +189,13 @@ jobs:
|
|||||||
max-total-time: 120
|
max-total-time: 120
|
||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||||
steps:
|
steps:
|
||||||
@@ -196,26 +212,23 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz cargo-gc-bin --force
|
cargo install cargo-fuzz cargo-gc-bin --force
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binary
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip bianry
|
- name: Unzip binary
|
||||||
run: |
|
run: |
|
||||||
tar -xvf ./bin.tar.gz
|
tar -xvf ./bin.tar.gz
|
||||||
rm ./bin.tar.gz
|
rm ./bin.tar.gz
|
||||||
@@ -237,16 +250,24 @@ jobs:
|
|||||||
name: unstable-fuzz-logs
|
name: unstable-fuzz-logs
|
||||||
path: /tmp/unstable-greptime/
|
path: /tmp/unstable-greptime/
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
|
|
||||||
build-greptime-ci:
|
build-greptime-ci:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Build GreptimeDB binary (profile-CI)
|
name: Build GreptimeDB binary (profile-CI)
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -255,13 +276,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
shared-key: "build-greptime-ci"
|
shared-key: "build-greptime-ci"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install cargo-gc-bin
|
- name: Install cargo-gc-bin
|
||||||
shell: bash
|
shell: bash
|
||||||
run: cargo install cargo-gc-bin --force
|
run: cargo install cargo-gc-bin --force
|
||||||
- name: Build greptime bianry
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime
|
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -276,11 +299,13 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
mode:
|
mode:
|
||||||
@@ -302,26 +327,23 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
- if: matrix.mode.minio
|
- if: matrix.mode.minio
|
||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -388,6 +410,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pod
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -410,11 +437,13 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
distributed-fuzztest-with-chaos:
|
distributed-fuzztest-with-chaos:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
|
||||||
mode:
|
mode:
|
||||||
@@ -449,6 +478,8 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
- name: Setup Chaos Mesh
|
- name: Setup Chaos Mesh
|
||||||
@@ -457,20 +488,15 @@ jobs:
|
|||||||
name: Setup Minio
|
name: Setup Minio
|
||||||
uses: ./.github/actions/setup-minio
|
uses: ./.github/actions/setup-minio
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup Kafka cluser
|
name: Setup Kafka cluster
|
||||||
uses: ./.github/actions/setup-kafka-cluster
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluster
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "fuzz-test-targets"
|
|
||||||
- name: Set Rust Fuzz
|
- name: Set Rust Fuzz
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -538,6 +564,11 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
- name: Describe pods
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe pod -n my-greptimedb
|
||||||
- name: Export kind logs
|
- name: Export kind logs
|
||||||
if: failure()
|
if: failure()
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -560,12 +591,14 @@ jobs:
|
|||||||
docker system prune -f
|
docker system prune -f
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Sqlness Test (${{ matrix.mode.name }})
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
opts: ""
|
opts: ""
|
||||||
@@ -573,13 +606,18 @@ jobs:
|
|||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
kafka: true
|
kafka: true
|
||||||
|
- name: "PostgreSQL KvBackend"
|
||||||
|
opts: "--setup-pg"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup kafka server
|
name: Setup kafka server
|
||||||
working-directory: tests-integration/fixtures/kafka
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose up -d --wait kafka
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -598,31 +636,32 @@ jobs:
|
|||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares across multiple jobs
|
|
||||||
shared-key: "check-rust-fmt"
|
|
||||||
- name: Check format
|
- name: Check format
|
||||||
run: make fmt-check
|
run: make fmt-check
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -635,55 +674,108 @@ jobs:
|
|||||||
# Shares across multiple jobs
|
# Shares across multiple jobs
|
||||||
# Shares with `Check` job
|
# Shares with `Check` job
|
||||||
shared-key: "check-lint"
|
shared-key: "check-lint"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
coverage:
|
conflict-check:
|
||||||
if: github.event.pull_request.draft == false
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04-8-cores
|
name: Check for conflict
|
||||||
timeout-minutes: 60
|
runs-on: ubuntu-latest
|
||||||
needs: [clippy, fmt]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- name: Merge Conflict Finder
|
||||||
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
|
test:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
|
||||||
|
runs-on: ubuntu-22.04-arm
|
||||||
|
timeout-minutes: 60
|
||||||
|
needs: [conflict-check, clippy, fmt]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: rui314/setup-mold@v1
|
||||||
with:
|
|
||||||
version: "14.0"
|
|
||||||
- name: Install toolchain
|
- name: Install toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
components: llvm-tools-preview
|
cache: false
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
# Shares cross multiple jobs
|
# Shares cross multiple jobs
|
||||||
shared-key: "coverage-test"
|
shared-key: "coverage-test"
|
||||||
- name: Docker Cache
|
cache-all-crates: "true"
|
||||||
uses: ScribeMD/docker-cache@0.3.7
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install latest nextest release
|
||||||
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Setup external services
|
||||||
|
working-directory: tests-integration/fixtures
|
||||||
|
run: docker compose up -d --wait
|
||||||
|
- name: Run nextest cases
|
||||||
|
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
RUST_MIN_STACK: 8388608 # 8MB
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
GT_MINIO_BUCKET: greptime
|
||||||
|
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||||
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
|
GT_MINIO_REGION: us-west-2
|
||||||
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }}
|
||||||
|
runs-on: ubuntu-22.04-8-cores
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
key: docker-${{ runner.os }}-coverage
|
persist-credentials: false
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: rui314/setup-mold@v1
|
||||||
|
- name: Install toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
components: llvm-tools
|
||||||
|
cache: false
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares cross multiple jobs
|
||||||
|
shared-key: "coverage-test"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
- name: Install cargo-llvm-cov
|
- name: Install cargo-llvm-cov
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Setup etcd server
|
- name: Setup external services
|
||||||
working-directory: tests-integration/fixtures/etcd
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose up -d --wait
|
||||||
- name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures/kafka
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Setup minio
|
|
||||||
working-directory: tests-integration/fixtures/minio
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Setup postgres server
|
|
||||||
working-directory: tests-integration/fixtures/postgres
|
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
@@ -697,6 +789,7 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
@@ -710,9 +803,10 @@ jobs:
|
|||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
# compat:
|
# compat:
|
||||||
|
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
# name: Compatibility Test
|
# name: Compatibility Test
|
||||||
# needs: build
|
# needs: build
|
||||||
# runs-on: ubuntu-20.04
|
# runs-on: ubuntu-22.04
|
||||||
# timeout-minutes: 60
|
# timeout-minutes: 60
|
||||||
# steps:
|
# steps:
|
||||||
# - uses: actions/checkout@v4
|
# - uses: actions/checkout@v4
|
||||||
|
|||||||
13
.github/workflows/docbot.yml
vendored
13
.github/workflows/docbot.yml
vendored
@@ -3,16 +3,21 @@ on:
|
|||||||
pull_request_target:
|
pull_request_target:
|
||||||
types: [opened, edited]
|
types: [opened, edited]
|
||||||
|
|
||||||
permissions:
|
concurrency:
|
||||||
pull-requests: write
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
contents: read
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docbot:
|
docbot:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Maybe Follow Up Docs Issue
|
- name: Maybe Follow Up Docs Issue
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
23
.github/workflows/docs.yml
vendored
23
.github/workflows/docs.yml
vendored
@@ -31,38 +31,47 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
@@ -71,7 +80,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
|||||||
49
.github/workflows/nightly-build.yml
vendored
49
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -66,18 +66,11 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
# Use the different image name to avoid conflict with the release images.
|
|
||||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
|
||||||
IMAGE_NAME: greptimedb-nightly
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -95,6 +88,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -147,6 +141,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -168,6 +163,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -186,24 +182,25 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ env.IMAGE_NAME }}
|
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: true
|
push-latest-tag: false
|
||||||
|
|
||||||
- name: Set nightly build result
|
- name: Set nightly build result
|
||||||
id: set-nightly-build-result
|
id: set-nightly-build-result
|
||||||
@@ -217,7 +214,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -226,13 +223,14 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ env.IMAGE_NAME }}
|
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -242,15 +240,16 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
upload-to-s3: false
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
update-version-info: false # Don't update version info in S3.
|
update-version-info: false # Don't update version info in S3.
|
||||||
push-latest-tag: true
|
push-latest-tag: false
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -260,6 +259,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -275,7 +275,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -285,6 +285,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -302,11 +303,15 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
37
.github/workflows/nightly-ci.yml
vendored
37
.github/workflows/nightly-ci.yml
vendored
@@ -9,19 +9,17 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Check install.sh
|
- name: Check install.sh
|
||||||
run: ./.github/scripts/check-install-script.sh
|
run: ./.github/scripts/check-install-script.sh
|
||||||
@@ -46,9 +44,14 @@ jobs:
|
|||||||
name: Sqlness tests on Windows
|
name: Sqlness tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-2022-8-cores
|
runs-on: windows-2022-8-cores
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -76,6 +79,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -101,7 +107,6 @@ jobs:
|
|||||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
@@ -109,20 +114,25 @@ jobs:
|
|||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
cleanbuild-linux-nix:
|
cleanbuild-linux-nix:
|
||||||
runs-on: ubuntu-latest-8-cores
|
name: Run clean build on Linux
|
||||||
timeout-minutes: 60
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
timeout-minutes: 45
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: cachix/install-nix-action@v27
|
|
||||||
with:
|
with:
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
fetch-depth: 0
|
||||||
- run: nix-shell --pure --run "cargo build"
|
persist-credentials: false
|
||||||
|
- uses: cachix/install-nix-action@v31
|
||||||
|
- run: nix develop --command cargo check --bin greptime
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -135,11 +145,14 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [check-status]
|
needs: [check-status]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
42
.github/workflows/pr-labeling.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: 'PR Labeling'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: ".github/labeler.yaml"
|
||||||
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
size-label:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: pascalgn/size-label-action@v0.5.5
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
with:
|
||||||
|
sizes: >
|
||||||
|
{
|
||||||
|
"0": "XS",
|
||||||
|
"100": "S",
|
||||||
|
"300": "M",
|
||||||
|
"1000": "L",
|
||||||
|
"1500": "XL",
|
||||||
|
"2000": "XXL"
|
||||||
|
}
|
||||||
122
.github/workflows/release-dev-builder-images.yaml
vendored
122
.github/workflows/release-dev-builder-images.yaml
vendored
@@ -24,12 +24,20 @@ on:
|
|||||||
description: Release dev-builder-android image
|
description: Release dev-builder-android image
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
|
update_dev_builder_image_tag:
|
||||||
|
type: boolean
|
||||||
|
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
# The jobs are triggered by the following events:
|
||||||
runs-on: ubuntu-20.04-16-cores
|
# 1. Manually triggered workflow_dispatch event
|
||||||
|
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
@@ -37,6 +45,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Configure build image version
|
- name: Configure build image version
|
||||||
id: set-version
|
id: set-version
|
||||||
@@ -56,13 +65,13 @@ jobs:
|
|||||||
version: ${{ env.VERSION }}
|
version: ${{ env.VERSION }}
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
|
||||||
release-dev-builder-images-ecr:
|
release-dev-builder-images-ecr:
|
||||||
name: Release dev builder images to AWS ECR
|
name: Release dev builder images to AWS ECR
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -84,52 +93,70 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -143,30 +170,63 @@ jobs:
|
|||||||
|
|
||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
|
update-dev-builder-image-tag:
|
||||||
|
name: Update dev-builder image tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update dev-builder image tag
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
|||||||
137
.github/workflows/release.yml
vendored
137
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -88,21 +88,14 @@ env:
|
|||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
|
|
||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
|
||||||
NEXT_RELEASE_VERSION: v0.12.0
|
|
||||||
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -117,11 +110,14 @@ jobs:
|
|||||||
|
|
||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
|
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Check Rust toolchain version
|
- name: Check Rust toolchain version
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -130,7 +126,7 @@ jobs:
|
|||||||
|
|
||||||
# The create-version will create a global variable named 'version' in the global workflows.
|
# The create-version will create a global variable named 'version' in the global workflows.
|
||||||
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
|
||||||
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
|
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313;
|
||||||
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -139,9 +135,13 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
|
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
|
- name: Check version
|
||||||
|
id: check-version
|
||||||
|
run: |
|
||||||
|
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
uses: ./.github/actions/start-runner
|
uses: ./.github/actions/start-runner
|
||||||
@@ -181,6 +181,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -202,6 +203,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -237,6 +239,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-macos-artifacts
|
- uses: ./.github/actions/build-macos-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -276,6 +279,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-windows-artifacts
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -299,22 +303,25 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -332,7 +339,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest-16-cores
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -341,13 +348,14 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: greptimedb
|
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -358,8 +366,9 @@ jobs:
|
|||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: true
|
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
@@ -372,11 +381,12 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Publish GitHub release
|
- name: Publish GitHub release
|
||||||
uses: ./.github/actions/publish-github-release
|
uses: ./.github/actions/publish-github-release
|
||||||
@@ -385,12 +395,12 @@ jobs:
|
|||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting.
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -400,6 +410,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -415,7 +426,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -425,6 +436,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -436,6 +448,74 @@ jobs:
|
|||||||
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
aws-region: ${{ vars.EC2_RUNNER_REGION }}
|
||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
bump-downstream-repo-versions:
|
||||||
|
name: Bump downstream repo versions
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Bump downstream repo versions
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/bump-versions.ts
|
||||||
|
env:
|
||||||
|
TARGET_REPOS: website,docs,demo
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
|
||||||
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
|
||||||
|
|
||||||
|
bump-helm-charts-version:
|
||||||
|
name: Bump helm charts version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump helm charts version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-helm-charts-version.sh
|
||||||
|
|
||||||
|
bump-homebrew-greptime-version:
|
||||||
|
name: Bump homebrew greptime version
|
||||||
|
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
needs: [allocate-runners, publish-github-release]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Bump homebrew greptime version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
|
||||||
|
VERSION: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/update-homebrew-greptme-version.sh
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
@@ -444,11 +524,18 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
10
.github/workflows/schedule.yml
vendored
10
.github/workflows/schedule.yml
vendored
@@ -4,18 +4,20 @@ on:
|
|||||||
- cron: '4 2 * * *'
|
- cron: '4 2 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
maintenance:
|
maintenance:
|
||||||
name: Periodic Maintenance
|
name: Periodic Maintenance
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Do Maintenance
|
- name: Do Maintenance
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
13
.github/workflows/semantic-pull-request.yml
vendored
13
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,15 +1,24 @@
|
|||||||
name: "Semantic Pull Request"
|
name: "Semantic Pull Request"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request_target:
|
pull_request:
|
||||||
types:
|
types:
|
||||||
- opened
|
- opened
|
||||||
- reopened
|
- reopened
|
||||||
- edited
|
- edited
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -54,3 +54,10 @@ tests-fuzz/corpus/
|
|||||||
# Nix
|
# Nix
|
||||||
.direnv
|
.direnv
|
||||||
.envrc
|
.envrc
|
||||||
|
|
||||||
|
## default data home
|
||||||
|
greptimedb_data
|
||||||
|
|
||||||
|
# github
|
||||||
|
!/.github
|
||||||
|
|
||||||
|
|||||||
21
AUTHOR.md
21
AUTHOR.md
@@ -3,30 +3,28 @@
|
|||||||
## Individual Committers (in alphabetical order)
|
## Individual Committers (in alphabetical order)
|
||||||
|
|
||||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||||
* [KKould](https://github.com/KKould)
|
|
||||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
|
||||||
* [etolbakov](https://github.com/etolbakov)
|
* [etolbakov](https://github.com/etolbakov)
|
||||||
* [irenjj](https://github.com/irenjj)
|
* [irenjj](https://github.com/irenjj)
|
||||||
* [tisonkun](https://github.com/tisonkun)
|
* [KKould](https://github.com/KKould)
|
||||||
* [Lanqing Yang](https://github.com/lyang24)
|
* [Lanqing Yang](https://github.com/lyang24)
|
||||||
|
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||||
|
* [tisonkun](https://github.com/tisonkun)
|
||||||
|
|
||||||
|
|
||||||
## Team Members (in alphabetical order)
|
## Team Members (in alphabetical order)
|
||||||
|
|
||||||
* [Breeze-P](https://github.com/Breeze-P)
|
|
||||||
* [GrepTime](https://github.com/GrepTime)
|
|
||||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
|
||||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
|
||||||
* [WenyXu](https://github.com/WenyXu)
|
|
||||||
* [ZonaHex](https://github.com/ZonaHex)
|
|
||||||
* [apdong2022](https://github.com/apdong2022)
|
* [apdong2022](https://github.com/apdong2022)
|
||||||
* [beryl678](https://github.com/beryl678)
|
* [beryl678](https://github.com/beryl678)
|
||||||
|
* [Breeze-P](https://github.com/Breeze-P)
|
||||||
* [daviderli614](https://github.com/daviderli614)
|
* [daviderli614](https://github.com/daviderli614)
|
||||||
* [discord9](https://github.com/discord9)
|
* [discord9](https://github.com/discord9)
|
||||||
* [evenyag](https://github.com/evenyag)
|
* [evenyag](https://github.com/evenyag)
|
||||||
* [fengjiachun](https://github.com/fengjiachun)
|
* [fengjiachun](https://github.com/fengjiachun)
|
||||||
* [fengys1996](https://github.com/fengys1996)
|
* [fengys1996](https://github.com/fengys1996)
|
||||||
|
* [GrepTime](https://github.com/GrepTime)
|
||||||
* [holalengyu](https://github.com/holalengyu)
|
* [holalengyu](https://github.com/holalengyu)
|
||||||
* [killme2008](https://github.com/killme2008)
|
* [killme2008](https://github.com/killme2008)
|
||||||
|
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||||
* [nicecui](https://github.com/nicecui)
|
* [nicecui](https://github.com/nicecui)
|
||||||
* [paomian](https://github.com/paomian)
|
* [paomian](https://github.com/paomian)
|
||||||
* [shuiyisong](https://github.com/shuiyisong)
|
* [shuiyisong](https://github.com/shuiyisong)
|
||||||
@@ -34,11 +32,14 @@
|
|||||||
* [sunng87](https://github.com/sunng87)
|
* [sunng87](https://github.com/sunng87)
|
||||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||||
* [waynexia](https://github.com/waynexia)
|
* [waynexia](https://github.com/waynexia)
|
||||||
|
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||||
|
* [WenyXu](https://github.com/WenyXu)
|
||||||
* [xtang](https://github.com/xtang)
|
* [xtang](https://github.com/xtang)
|
||||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||||
* [zhongzc](https://github.com/zhongzc)
|
* [zhongzc](https://github.com/zhongzc)
|
||||||
|
* [ZonaHex](https://github.com/ZonaHex)
|
||||||
* [zyy17](https://github.com/zyy17)
|
* [zyy17](https://github.com/zyy17)
|
||||||
|
|
||||||
## All Contributors
|
## All Contributors
|
||||||
|
|
||||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||||
|
|||||||
3966
Cargo.lock
generated
3966
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
89
Cargo.toml
89
Cargo.toml
@@ -55,7 +55,6 @@ members = [
|
|||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
@@ -68,7 +67,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.12.0"
|
version = "0.12.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -79,11 +78,10 @@ clippy.dbg_macro = "warn"
|
|||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
|
||||||
rust.non_local_definitions = "allow"
|
|
||||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||||
@@ -91,57 +89,66 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
|||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "51.0"
|
arrow-flight = "53.0"
|
||||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
|
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
|
||||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
arrow-schema = { version = "53.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
axum = { version = "0.6", features = ["headers"] }
|
# Remember to update axum-extra, axum-macros when updating axum
|
||||||
|
axum = "0.8"
|
||||||
|
axum-extra = "0.10"
|
||||||
|
axum-macros = "0.4"
|
||||||
|
backon = "1"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.7", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
|
chrono-tz = "0.10.1"
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
deadpool = "0.10"
|
deadpool = "0.10"
|
||||||
deadpool-postgres = "0.12"
|
deadpool-postgres = "0.12"
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
etcd-client = "0.13"
|
etcd-client = "0.14"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "0.2"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
|
hyper = "1.1"
|
||||||
|
hyper-util = "0.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
local-ip-address = "0.6"
|
||||||
|
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||||
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||||
mockall = "0.11.4"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
nalgebra = "0.33"
|
nalgebra = "0.33"
|
||||||
notify = "6.1"
|
notify = "6.1"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.5", features = [
|
opentelemetry-proto = { version = "0.27", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
@@ -149,12 +156,14 @@ opentelemetry-proto = { version = "0.5", features = [
|
|||||||
"logs",
|
"logs",
|
||||||
] }
|
] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.4.3", features = ["ser"] }
|
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [
|
||||||
prost = "0.12"
|
"ser",
|
||||||
|
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
|
||||||
|
prost = "0.13"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
ratelimit = "0.9"
|
ratelimit = "0.9"
|
||||||
@@ -173,33 +182,36 @@ rstest = "0.21"
|
|||||||
rstest_reuse = "0.7"
|
rstest_reuse = "0.7"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
rustc-hash = "2.0"
|
||||||
|
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
shadow-rs = "0.35"
|
shadow-rs = "0.38"
|
||||||
similar-asserts = "1.6.0"
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.52.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
"serde",
|
"serde",
|
||||||
] }
|
] } # on branch v0.44.x
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.40", features = ["full"] }
|
tokio = { version = "1.40", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-postgres = "0.7"
|
||||||
|
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = "0.4"
|
tower = "0.5"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
@@ -256,7 +268,6 @@ plugins = { path = "src/plugins" }
|
|||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
script = { path = "src/script" }
|
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
@@ -266,18 +277,16 @@ table = { path = "src/table" }
|
|||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
|
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||||
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
|
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||||
# Apply a fix for pprof for unaligned pointer access
|
|
||||||
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
|
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
|
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
[target.aarch64-unknown-linux-gnu]
|
||||||
|
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
||||||
|
|
||||||
[build]
|
[build]
|
||||||
pre-build = [
|
pre-build = [
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||||
@@ -5,3 +8,8 @@ pre-build = [
|
|||||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[build.env]
|
||||||
|
passthrough = [
|
||||||
|
"JEMALLOC_SYS_WITH_LG_PAGE",
|
||||||
|
]
|
||||||
|
|||||||
7
Makefile
7
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
|
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -165,15 +165,14 @@ nextest: ## Install nextest tools.
|
|||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness ${SQLNESS_OPTS}
|
cargo sqlness ${SQLNESS_OPTS}
|
||||||
|
|
||||||
# Run fuzz test ${FUZZ_TARGET}.
|
|
||||||
RUNS ?= 1
|
RUNS ?= 1
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
.PHONY: fuzz
|
.PHONY: fuzz
|
||||||
fuzz:
|
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||||
|
|
||||||
.PHONY: fuzz-ls
|
.PHONY: fuzz-ls
|
||||||
fuzz-ls:
|
fuzz-ls: ## List all fuzz targets.
|
||||||
cargo fuzz list --fuzz-dir tests-fuzz
|
cargo fuzz list --fuzz-dir tests-fuzz
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
@@ -116,7 +116,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
|||||||
--name greptime --rm \
|
--name greptime --rm \
|
||||||
greptime/greptimedb:latest standalone start \
|
greptime/greptimedb:latest standalone start \
|
||||||
--http-addr 0.0.0.0:4000 \
|
--http-addr 0.0.0.0:4000 \
|
||||||
--rpc-addr 0.0.0.0:4001 \
|
--rpc-bind-addr 0.0.0.0:4001 \
|
||||||
--mysql-addr 0.0.0.0:4002 \
|
--mysql-addr 0.0.0.0:4002 \
|
||||||
--postgres-addr 0.0.0.0:4003
|
--postgres-addr 0.0.0.0:4003
|
||||||
```
|
```
|
||||||
@@ -138,7 +138,8 @@ Check the prerequisite:
|
|||||||
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||||
|
* Python toolchain (optional): Required only if using some test scripts.
|
||||||
|
|
||||||
Build GreptimeDB binary:
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
@@ -228,4 +229,3 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
|||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
|
||||||
|
|||||||
@@ -26,8 +26,10 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
@@ -38,6 +40,7 @@
|
|||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -47,6 +50,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -56,6 +60,8 @@
|
|||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||||
|
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
@@ -63,8 +69,8 @@
|
|||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
@@ -86,11 +92,14 @@
|
|||||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
||||||
|
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
@@ -143,15 +152,16 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -168,6 +178,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -212,9 +224,11 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
@@ -225,6 +239,7 @@
|
|||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -234,6 +249,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -243,6 +259,8 @@
|
|||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||||
|
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
@@ -292,13 +310,16 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||||
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
|
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
@@ -323,7 +344,7 @@
|
|||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
@@ -366,19 +387,14 @@
|
|||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
|
||||||
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
|
|
||||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
|
||||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
|
||||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
@@ -477,15 +493,16 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -502,6 +519,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -534,12 +553,18 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
|||||||
@@ -19,26 +19,6 @@ init_regions_parallelism = 16
|
|||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Deprecated, use `grpc.addr` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_addr = "127.0.0.1:3001"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.hostname` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_hostname = "127.0.0.1"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.runtime_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_runtime_size = 8
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_max_recv_message_size = "512MB"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
@@ -56,10 +36,11 @@ body_limit = "64MB"
|
|||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:3001"
|
bind_addr = "127.0.0.1:3001"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||||
## and used for connections from outside the host
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
hostname = "127.0.0.1"
|
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||||
|
server_addr = "127.0.0.1:3001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
@@ -250,6 +231,7 @@ overwrite_entry_start_id = false
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -516,6 +498,20 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## The TTL of the staging directory.
|
||||||
|
## Defaults to 7 days.
|
||||||
|
## Setting it to "0s" to disable TTL.
|
||||||
|
staging_ttl = "7d"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -543,15 +539,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -622,6 +609,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
@@ -5,13 +5,19 @@ mode = "distributed"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 14
|
node_id = 14
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:6800"
|
bind_addr = "127.0.0.1:6800"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv,
|
||||||
## and used for connections from outside the host
|
## and used for connections from outside the host
|
||||||
hostname = "127.0.0.1"
|
server_addr = "127.0.0.1:6800"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
@@ -19,6 +25,16 @@ max_recv_message_size = "512MB"
|
|||||||
## The maximum send message size for gRPC server.
|
## The maximum send message size for gRPC server.
|
||||||
max_send_message_size = "512MB"
|
max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
|
body_limit = "64MB"
|
||||||
|
|
||||||
## The metasrv client options.
|
## The metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
|
|||||||
@@ -31,14 +31,21 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
bind_addr = "127.0.0.1:4001"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||||
## and used for connections from outside the host
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
hostname = "127.0.0.1"
|
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||||
|
server_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
@@ -67,6 +74,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -98,6 +108,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -125,6 +138,11 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
|
## Jaeger protocol options.
|
||||||
|
[jaeger]
|
||||||
|
## Whether to enable Jaeger protocol in HTTP API.
|
||||||
|
enable = true
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
|
|||||||
@@ -4,17 +4,35 @@ data_home = "/tmp/metasrv/"
|
|||||||
## The bind address of metasrv.
|
## The bind address of metasrv.
|
||||||
bind_addr = "127.0.0.1:3002"
|
bind_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||||
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
|
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||||
server_addr = "127.0.0.1:3002"
|
server_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## Store server address default to etcd store.
|
## Store server address default to etcd store.
|
||||||
|
## For postgres store, the format is:
|
||||||
|
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||||
|
## For etcd store, the format is:
|
||||||
|
## "127.0.0.1:2379"
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
store_key_prefix = ""
|
store_key_prefix = ""
|
||||||
|
|
||||||
## The datastore for meta server.
|
## The datastore for meta server.
|
||||||
backend = "EtcdStore"
|
## Available values:
|
||||||
|
## - `etcd_store` (default value)
|
||||||
|
## - `memory_store`
|
||||||
|
## - `postgres_store`
|
||||||
|
backend = "etcd_store"
|
||||||
|
|
||||||
|
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||||
|
## **Only used when backend is `postgres_store`.**
|
||||||
|
meta_table_name = "greptime_metakv"
|
||||||
|
|
||||||
|
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||||
|
## Only used when backend is `postgres_store`.
|
||||||
|
meta_election_lock_id = 1
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `round_robin` (default value)
|
## - `round_robin` (default value)
|
||||||
@@ -32,6 +50,9 @@ use_memory_store = false
|
|||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
|
## Max allowed idle time before removing node info from metasrv memory.
|
||||||
|
node_max_idle_time = "24hours"
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
@@ -113,6 +134,8 @@ num_topics = 64
|
|||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
## Only accepts strings that match the following regular expression pattern:
|
||||||
|
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
|
|||||||
@@ -39,11 +39,17 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
bind_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
@@ -72,6 +78,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -103,6 +112,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -130,6 +142,11 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
|
## Jaeger protocol options.
|
||||||
|
[jaeger]
|
||||||
|
## Whether to enable Jaeger protocol in HTTP API.
|
||||||
|
enable = true
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
@@ -153,11 +170,11 @@ dir = "/tmp/greptimedb/wal"
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "128MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a purge.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "1GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a purge.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "1m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
@@ -272,10 +289,12 @@ overwrite_entry_start_id = false
|
|||||||
|
|
||||||
## Metadata storage options.
|
## Metadata storage options.
|
||||||
[metadata_store]
|
[metadata_store]
|
||||||
## Kv file size in bytes.
|
## The size of the metadata store log file.
|
||||||
file_size = "256MB"
|
file_size = "64MB"
|
||||||
## Kv purge threshold.
|
## The threshold of the metadata store size to trigger a purge.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "256MB"
|
||||||
|
## The interval of the metadata store to trigger a purge.
|
||||||
|
purge_interval = "1m"
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
@@ -284,6 +303,12 @@ max_retry_times = 3
|
|||||||
## Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
@@ -293,6 +318,7 @@ retry_delay = "500ms"
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -559,6 +585,20 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## The TTL of the staging directory.
|
||||||
|
## Defaults to 7 days.
|
||||||
|
## Setting it to "0s" to disable TTL.
|
||||||
|
staging_ttl = "7d"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -586,15 +626,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -665,6 +696,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
75
cyborg/bin/bump-doc-version.ts
Normal file
75
cyborg/bin/bump-doc-version.ts
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {obtainClient} from "@/common";
|
||||||
|
|
||||||
|
async function triggerWorkflow(workflowId: string, version: string) {
|
||||||
|
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||||
|
try {
|
||||||
|
await docsClient.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: "GreptimeTeam",
|
||||||
|
repo: "docs",
|
||||||
|
workflow_id: workflowId,
|
||||||
|
ref: "main",
|
||||||
|
inputs: {
|
||||||
|
version,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Successfully triggered ${workflowId} workflow with version ${version}`);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Failed to trigger workflow: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function determineWorkflow(version: string): [string, string] {
|
||||||
|
// Check if it's a nightly version
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
return ['bump-nightly-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = version.split('.');
|
||||||
|
|
||||||
|
if (parts.length !== 3) {
|
||||||
|
throw new Error('Invalid version format');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If patch version (last number) is 0, it's a major version
|
||||||
|
// Return only major.minor version
|
||||||
|
if (parts[2] === '0') {
|
||||||
|
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise it's a patch version, use full version
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const version = process.env.VERSION;
|
||||||
|
if (!version) {
|
||||||
|
core.setFailed("VERSION environment variable is required");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove 'v' prefix if exists
|
||||||
|
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const [workflowId, apiVersion] = determineWorkflow(cleanVersion);
|
||||||
|
triggerWorkflow(workflowId, apiVersion);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Error processing version: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
156
cyborg/bin/bump-versions.ts
Normal file
156
cyborg/bin/bump-versions.ts
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import {obtainClient} from "@/common";
|
||||||
|
|
||||||
|
interface RepoConfig {
|
||||||
|
tokenEnv: string;
|
||||||
|
repo: string;
|
||||||
|
workflowLogic: (version: string) => [string, string] | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const REPO_CONFIGS: Record<string, RepoConfig> = {
|
||||||
|
website: {
|
||||||
|
tokenEnv: "WEBSITE_REPO_TOKEN",
|
||||||
|
repo: "website",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Skip nightly versions for website
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
console.log('Nightly version detected for website, skipping workflow trigger.');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
},
|
||||||
|
demo: {
|
||||||
|
tokenEnv: "DEMO_REPO_TOKEN",
|
||||||
|
repo: "demo-scene",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Skip nightly versions for demo
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
console.log('Nightly version detected for demo, skipping workflow trigger.');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
},
|
||||||
|
docs: {
|
||||||
|
tokenEnv: "DOCS_REPO_TOKEN",
|
||||||
|
repo: "docs",
|
||||||
|
workflowLogic: (version: string) => {
|
||||||
|
// Check if it's a nightly version
|
||||||
|
if (version.includes('nightly')) {
|
||||||
|
return ['bump-nightly-version.yml', version];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = version.split('.');
|
||||||
|
if (parts.length !== 3) {
|
||||||
|
throw new Error('Invalid version format');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If patch version (last number) is 0, it's a major version
|
||||||
|
// Return only major.minor version
|
||||||
|
if (parts[2] === '0') {
|
||||||
|
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise it's a patch version, use full version
|
||||||
|
return ['bump-patch-version.yml', version];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
|
||||||
|
const client = obtainClient(repoConfig.tokenEnv);
|
||||||
|
try {
|
||||||
|
await client.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: "GreptimeTeam",
|
||||||
|
repo: repoConfig.repo,
|
||||||
|
workflow_id: workflowId,
|
||||||
|
ref: "main",
|
||||||
|
inputs: {
|
||||||
|
version,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function processRepo(repoName: string, version: string) {
|
||||||
|
const repoConfig = REPO_CONFIGS[repoName];
|
||||||
|
if (!repoConfig) {
|
||||||
|
throw new Error(`Unknown repository: ${repoName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const workflowResult = repoConfig.workflowLogic(version);
|
||||||
|
if (workflowResult === null) {
|
||||||
|
// Skip this repo (e.g., nightly version for website)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const [workflowId, apiVersion] = workflowResult;
|
||||||
|
await triggerWorkflow(repoConfig, workflowId, apiVersion);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
const version = process.env.VERSION;
|
||||||
|
if (!version) {
|
||||||
|
core.setFailed("VERSION environment variable is required");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove 'v' prefix if exists
|
||||||
|
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
|
||||||
|
|
||||||
|
// Get target repositories from environment variable
|
||||||
|
// Default to both if not specified
|
||||||
|
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
|
||||||
|
|
||||||
|
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
|
||||||
|
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
// Process each repository
|
||||||
|
for (const repo of targetRepos) {
|
||||||
|
try {
|
||||||
|
await processRepo(repo, cleanVersion);
|
||||||
|
} catch (error) {
|
||||||
|
errors.push(`${repo}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errors.length > 0) {
|
||||||
|
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('All repositories processed successfully');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute main function
|
||||||
|
main().catch((error) => {
|
||||||
|
core.setFailed(`Unexpected error: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
@@ -55,12 +55,25 @@ async function main() {
|
|||||||
await client.rest.issues.addLabels({
|
await client.rest.issues.addLabels({
|
||||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Get available assignees for the docs repo
|
||||||
|
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||||
|
owner: 'GreptimeTeam',
|
||||||
|
repo: 'docs',
|
||||||
|
})
|
||||||
|
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||||
|
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||||
|
|
||||||
|
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||||
|
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||||
|
core.info(`Assigning issue to: ${assignee}`)
|
||||||
|
|
||||||
await docsClient.rest.issues.create({
|
await docsClient.rest.issues.create({
|
||||||
owner: 'GreptimeTeam',
|
owner: 'GreptimeTeam',
|
||||||
repo: 'docs',
|
repo: 'docs',
|
||||||
title: `Update docs for ${title}`,
|
title: `Update docs for ${title}`,
|
||||||
body: `A document change request is generated from ${html_url}`,
|
body: `A document change request is generated from ${html_url}`,
|
||||||
assignee: actor,
|
assignee: assignee,
|
||||||
}).then((res) => {
|
}).then((res) => {
|
||||||
core.info(`Created issue ${res.data}`)
|
core.info(`Created issue ${res.data}`)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
|
|||||||
@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
|||||||
@@ -9,16 +9,20 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
|
|||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
|
unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3 \
|
|
||||||
python3-dev \
|
# Install protoc
|
||||||
python3-pip \
|
ARG PROTOBUF_VERSION=29.3
|
||||||
&& pip3 install --upgrade pip \
|
|
||||||
&& pip3 install pyarrow
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Trust workdir
|
# Trust workdir
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|||||||
@@ -12,18 +12,21 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
ARG PROTOBUF_VERSION=29.3
|
||||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|
||||||
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Install Rust toolchains.
|
# Install Rust toolchains.
|
||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
|
|||||||
@@ -6,11 +6,8 @@ ARG DOCKER_BUILD_ROOT=.
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
@@ -20,39 +17,24 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3.10 \
|
|
||||||
python3.10-dev
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN echo "target platform: $TARGETPLATFORM"
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
|
|
||||||
|
ARG PROTOBUF_VERSION=29.3
|
||||||
|
|
||||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||||
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||||
fi
|
fi
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
|
||||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
|
||||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
|
||||||
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
|
|
||||||
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
|
|
||||||
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
|
|
||||||
|
|
||||||
# Remove Python 3.8 and install pip.
|
|
||||||
RUN apt-get -y purge python3.8 && \
|
|
||||||
apt-get -y autoremove && \
|
|
||||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
|
||||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||||
@@ -65,10 +47,6 @@ RUN apt-get -y purge python3.8 && \
|
|||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory '*'
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
# Install Python dependencies.
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
pkg-config
|
pkg-config
|
||||||
|
|
||||||
# Install protoc.
|
# Install protoc.
|
||||||
ENV PROTOC_VERSION=25.1
|
ENV PROTOC_VERSION=29.3
|
||||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
|||||||
@@ -39,14 +39,16 @@ services:
|
|||||||
container_name: metasrv
|
container_name: metasrv
|
||||||
ports:
|
ports:
|
||||||
- 3002:3002
|
- 3002:3002
|
||||||
|
- 3000:3000
|
||||||
command:
|
command:
|
||||||
- metasrv
|
- metasrv
|
||||||
- start
|
- start
|
||||||
- --bind-addr=0.0.0.0:3002
|
- --rpc-bind-addr=0.0.0.0:3002
|
||||||
- --server-addr=metasrv:3002
|
- --rpc-server-addr=metasrv:3002
|
||||||
- --store-addrs=etcd0:2379
|
- --store-addrs=etcd0:2379
|
||||||
|
- --http-addr=0.0.0.0:3000
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -66,17 +68,17 @@ services:
|
|||||||
- datanode
|
- datanode
|
||||||
- start
|
- start
|
||||||
- --node-id=0
|
- --node-id=0
|
||||||
- --rpc-addr=0.0.0.0:3001
|
- --rpc-bind-addr=0.0.0.0:3001
|
||||||
- --rpc-hostname=datanode0:3001
|
- --rpc-server-addr=datanode0:3001
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --http-addr=0.0.0.0:5000
|
- --http-addr=0.0.0.0:5000
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 10
|
||||||
depends_on:
|
depends_on:
|
||||||
metasrv:
|
metasrv:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -96,7 +98,7 @@ services:
|
|||||||
- start
|
- start
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --http-addr=0.0.0.0:4000
|
- --http-addr=0.0.0.0:4000
|
||||||
- --rpc-addr=0.0.0.0:4001
|
- --rpc-bind-addr=0.0.0.0:4001
|
||||||
- --mysql-addr=0.0.0.0:4002
|
- --mysql-addr=0.0.0.0:4002
|
||||||
- --postgres-addr=0.0.0.0:4003
|
- --postgres-addr=0.0.0.0:4003
|
||||||
healthcheck:
|
healthcheck:
|
||||||
@@ -115,16 +117,23 @@ services:
|
|||||||
container_name: flownode0
|
container_name: flownode0
|
||||||
ports:
|
ports:
|
||||||
- 4004:4004
|
- 4004:4004
|
||||||
|
- 4005:4005
|
||||||
command:
|
command:
|
||||||
- flownode
|
- flownode
|
||||||
- start
|
- start
|
||||||
- --node-id=0
|
- --node-id=0
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --rpc-addr=0.0.0.0:4004
|
- --rpc-bind-addr=0.0.0.0:4004
|
||||||
- --rpc-hostname=flownode0:4004
|
- --rpc-server-addr=flownode0:4004
|
||||||
|
- --http-addr=0.0.0.0:4005
|
||||||
depends_on:
|
depends_on:
|
||||||
frontend0:
|
frontend0:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
- greptimedb
|
- greptimedb
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
numpy>=1.24.2
|
|
||||||
pandas>=1.5.3
|
|
||||||
pyarrow>=11.0.0
|
|
||||||
requests>=2.28.2
|
|
||||||
scipy>=1.10.1
|
|
||||||
@@ -4,6 +4,16 @@ This crate provides an easy approach to dump memory profiling info.
|
|||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
### jemalloc
|
### jemalloc
|
||||||
|
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
|
||||||
|
```
|
||||||
|
# find jeprof binary
|
||||||
|
find . -name 'jeprof'
|
||||||
|
# add executable permission
|
||||||
|
chmod +x <path_to_jeprof>
|
||||||
|
```
|
||||||
|
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
|
||||||
|
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
|
||||||
|
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
|
||||||
```bash
|
```bash
|
||||||
# for macOS
|
# for macOS
|
||||||
brew install jemalloc
|
brew install jemalloc
|
||||||
@@ -23,7 +33,11 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
|||||||
Start GreptimeDB instance with environment variables:
|
Start GreptimeDB instance with environment variables:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# for Linux
|
||||||
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
|
|
||||||
|
# for macOS
|
||||||
|
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
Dump memory profiling data through HTTP API:
|
Dump memory profiling data through HTTP API:
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 25 KiB |
BIN
docs/logo-text-padding.png
Executable file → Normal file
BIN
docs/logo-text-padding.png
Executable file → Normal file
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 21 KiB |
100
flake.lock
generated
Normal file
100
flake.lock
generated
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737613896,
|
||||||
|
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "303a062fdd8e89f233db05868468975d17855d80",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1731533236,
|
||||||
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737569578,
|
||||||
|
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737581772,
|
||||||
|
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
56
flake.nix
Normal file
56
flake.nix
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
description = "Development environment flake";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, fenix, flake-utils }:
|
||||||
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libgit2
|
||||||
|
libz
|
||||||
|
];
|
||||||
|
lib = nixpkgs.lib;
|
||||||
|
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||||
|
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||||
|
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
clang
|
||||||
|
gcc
|
||||||
|
protobuf
|
||||||
|
gnumake
|
||||||
|
mold
|
||||||
|
(rustToolchain.withComponents [
|
||||||
|
"cargo"
|
||||||
|
"clippy"
|
||||||
|
"rust-src"
|
||||||
|
"rustc"
|
||||||
|
"rustfmt"
|
||||||
|
"rust-analyzer"
|
||||||
|
"llvm-tools"
|
||||||
|
])
|
||||||
|
cargo-nextest
|
||||||
|
cargo-llvm-cov
|
||||||
|
taplo
|
||||||
|
curl
|
||||||
|
gnuplot ## for cargo bench
|
||||||
|
];
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-10-19"
|
channel = "nightly-2024-12-25"
|
||||||
components = ["rust-analyzer"]
|
|
||||||
|
|||||||
@@ -53,6 +53,54 @@ get_arch_type() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Verify SHA256 checksum
|
||||||
|
verify_sha256() {
|
||||||
|
file="$1"
|
||||||
|
expected_sha256="$2"
|
||||||
|
|
||||||
|
if command -v sha256sum >/dev/null 2>&1; then
|
||||||
|
actual_sha256=$(sha256sum "$file" | cut -d' ' -f1)
|
||||||
|
elif command -v shasum >/dev/null 2>&1; then
|
||||||
|
actual_sha256=$(shasum -a 256 "$file" | cut -d' ' -f1)
|
||||||
|
else
|
||||||
|
echo "Warning: No SHA256 verification tool found (sha256sum or shasum). Skipping checksum verification."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$actual_sha256" = "$expected_sha256" ]; then
|
||||||
|
echo "SHA256 checksum verified successfully."
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "Error: SHA256 checksum verification failed!"
|
||||||
|
echo "Expected: $expected_sha256"
|
||||||
|
echo "Actual: $actual_sha256"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prompt for user confirmation (compatible with different shells)
|
||||||
|
prompt_confirmation() {
|
||||||
|
message="$1"
|
||||||
|
printf "%s (y/N): " "$message"
|
||||||
|
|
||||||
|
# Try to read user input, fallback if read fails
|
||||||
|
answer=""
|
||||||
|
if read answer </dev/tty 2>/dev/null; then
|
||||||
|
case "$answer" in
|
||||||
|
[Yy]|[Yy][Ee][Ss])
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "Cannot read user input. Defaulting to No."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
download_artifact() {
|
download_artifact() {
|
||||||
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
if [ -n "${OS_TYPE}" ] && [ -n "${ARCH_TYPE}" ]; then
|
||||||
# Use the latest stable released version.
|
# Use the latest stable released version.
|
||||||
@@ -71,17 +119,104 @@ download_artifact() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
echo "Downloading ${BIN}, OS: ${OS_TYPE}, Arch: ${ARCH_TYPE}, Version: ${VERSION}"
|
||||||
PACKAGE_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}.tar.gz"
|
PKG_NAME="${BIN}-${OS_TYPE}-${ARCH_TYPE}-${VERSION}"
|
||||||
|
PACKAGE_NAME="${PKG_NAME}.tar.gz"
|
||||||
|
SHA256_FILE="${PKG_NAME}.sha256sum"
|
||||||
|
|
||||||
if [ -n "${PACKAGE_NAME}" ]; then
|
if [ -n "${PACKAGE_NAME}" ]; then
|
||||||
wget "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"
|
# Check if files already exist and prompt for override
|
||||||
|
if [ -f "${PACKAGE_NAME}" ]; then
|
||||||
|
echo "File ${PACKAGE_NAME} already exists."
|
||||||
|
if prompt_confirmation "Do you want to override it?"; then
|
||||||
|
echo "Overriding existing file..."
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
else
|
||||||
|
echo "Skipping download. Using existing file."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "${BIN}" ]; then
|
||||||
|
echo "Binary ${BIN} already exists."
|
||||||
|
if prompt_confirmation "Do you want to override it?"; then
|
||||||
|
echo "Will override existing binary..."
|
||||||
|
rm -f "${BIN}"
|
||||||
|
else
|
||||||
|
echo "Installation cancelled."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download package if not exists
|
||||||
|
if [ ! -f "${PACKAGE_NAME}" ]; then
|
||||||
|
echo "Downloading ${PACKAGE_NAME}..."
|
||||||
|
# Use curl instead of wget for better compatibility
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if ! curl -L -o "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||||
|
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
if ! wget -O "${PACKAGE_NAME}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${PACKAGE_NAME}"; then
|
||||||
|
echo "Error: Failed to download ${PACKAGE_NAME}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: Neither curl nor wget is available for downloading."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download and verify SHA256 checksum
|
||||||
|
echo "Downloading SHA256 checksum..."
|
||||||
|
sha256_download_success=0
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if curl -L -s -o "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||||
|
sha256_download_success=1
|
||||||
|
fi
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
if wget -q -O "${SHA256_FILE}" "https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download/${VERSION}/${SHA256_FILE}" 2>/dev/null; then
|
||||||
|
sha256_download_success=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $sha256_download_success -eq 1 ] && [ -f "${SHA256_FILE}" ]; then
|
||||||
|
expected_sha256=$(cat "${SHA256_FILE}" | cut -d' ' -f1)
|
||||||
|
if [ -n "$expected_sha256" ]; then
|
||||||
|
if ! verify_sha256 "${PACKAGE_NAME}" "${expected_sha256}"; then
|
||||||
|
echo "SHA256 verification failed. Removing downloaded file."
|
||||||
|
rm -f "${PACKAGE_NAME}" "${SHA256_FILE}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Warning: Could not parse SHA256 checksum from file."
|
||||||
|
fi
|
||||||
|
rm -f "${SHA256_FILE}"
|
||||||
|
else
|
||||||
|
echo "Warning: Could not download SHA256 checksum file. Skipping verification."
|
||||||
|
fi
|
||||||
|
|
||||||
# Extract the binary and clean the rest.
|
# Extract the binary and clean the rest.
|
||||||
tar xvf "${PACKAGE_NAME}" && \
|
echo "Extracting ${PACKAGE_NAME}..."
|
||||||
mv "${PACKAGE_NAME%.tar.gz}/${BIN}" "${PWD}" && \
|
if ! tar xf "${PACKAGE_NAME}"; then
|
||||||
rm -r "${PACKAGE_NAME}" && \
|
echo "Error: Failed to extract ${PACKAGE_NAME}"
|
||||||
rm -r "${PACKAGE_NAME%.tar.gz}" && \
|
exit 1
|
||||||
echo "Run './${BIN} --help' to get started"
|
fi
|
||||||
|
|
||||||
|
# Find the binary in the extracted directory
|
||||||
|
extracted_dir="${PACKAGE_NAME%.tar.gz}"
|
||||||
|
if [ -f "${extracted_dir}/${BIN}" ]; then
|
||||||
|
mv "${extracted_dir}/${BIN}" "${PWD}/"
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
rm -rf "${extracted_dir}"
|
||||||
|
chmod +x "${BIN}"
|
||||||
|
echo "Installation completed successfully!"
|
||||||
|
echo "Run './${BIN} --help' to get started"
|
||||||
|
else
|
||||||
|
echo "Error: Binary ${BIN} not found in extracted archive"
|
||||||
|
rm -f "${PACKAGE_NAME}"
|
||||||
|
rm -rf "${extracted_dir}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
29
shell.nix
29
shell.nix
@@ -1,29 +0,0 @@
|
|||||||
let
|
|
||||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11";
|
|
||||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
|
||||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
|
||||||
in
|
|
||||||
|
|
||||||
pkgs.mkShell rec {
|
|
||||||
nativeBuildInputs = with pkgs; [
|
|
||||||
pkg-config
|
|
||||||
git
|
|
||||||
clang
|
|
||||||
gcc
|
|
||||||
protobuf
|
|
||||||
mold
|
|
||||||
(fenix.fromToolchainFile {
|
|
||||||
dir = ./.;
|
|
||||||
})
|
|
||||||
cargo-nextest
|
|
||||||
taplo
|
|
||||||
curl
|
|
||||||
];
|
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
|
||||||
libgit2
|
|
||||||
libz
|
|
||||||
];
|
|
||||||
|
|
||||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
|
||||||
}
|
|
||||||
@@ -15,13 +15,10 @@ common-macro.workspace = true
|
|||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
paste = "1.0"
|
paste.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = "0.11"
|
tonic-build = "0.11"
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
paste = "1.0"
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: prost::DecodeError,
|
error: prost::UnknownEnumValue,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ impl ColumnDataTypeWrapper {
|
|||||||
|
|
||||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
(self.datatype, self.datatype_ext.clone())
|
(self.datatype, self.datatype_ext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -685,14 +685,18 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
||||||
values.interval_year_month_values,
|
values.interval_year_month_values,
|
||||||
)),
|
)),
|
||||||
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
|
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values(
|
||||||
values.interval_day_time_values,
|
values
|
||||||
|
.interval_day_time_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| IntervalDayTime::from_i64(*x).into()),
|
||||||
)),
|
)),
|
||||||
IntervalType::MonthDayNano(_) => {
|
IntervalType::MonthDayNano(_) => {
|
||||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||||
values.interval_month_day_nano_values.iter().map(|x| {
|
values
|
||||||
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
|
.interval_month_day_nano_values
|
||||||
}),
|
.iter()
|
||||||
|
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1495,14 +1499,22 @@ mod tests {
|
|||||||
column.values.as_ref().unwrap().interval_year_month_values
|
column.values.as_ref().unwrap().interval_year_month_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
|
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![
|
||||||
|
IntervalDayTime::new(0, 4).into(),
|
||||||
|
IntervalDayTime::new(0, 5).into(),
|
||||||
|
IntervalDayTime::new(0, 6).into(),
|
||||||
|
]));
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vec![4, 5, 6],
|
vec![4, 5, 6],
|
||||||
column.values.as_ref().unwrap().interval_day_time_values
|
column.values.as_ref().unwrap().interval_day_time_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
|
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![
|
||||||
|
IntervalMonthDayNano::new(0, 0, 7).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 8).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 9).into(),
|
||||||
|
]));
|
||||||
let len = vector.len();
|
let len = vector.len();
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
(0..len).for_each(|i| {
|
(0..len).for_each(|i| {
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexType,
|
||||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1::Analyzer;
|
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
@@ -34,10 +34,8 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
|||||||
|
|
||||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(
|
let data_type =
|
||||||
column_def.data_type,
|
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
||||||
column_def.datatype_extension.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
@@ -57,13 +55,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
}
|
}
|
||||||
if let Some(options) = column_def.options.as_ref() {
|
if let Some(options) = column_def.options.as_ref() {
|
||||||
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
|
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +80,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||||
options
|
options
|
||||||
.options
|
.options
|
||||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
|
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||||
options
|
options
|
||||||
@@ -102,7 +100,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||||
options
|
options
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||||
@@ -123,6 +121,13 @@ pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
|
||||||
|
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
|
||||||
|
match skipping_index_type {
|
||||||
|
PbSkippingIndexType::BloomFilter => SkippingIndexType::BloomFilter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -181,14 +186,14 @@ mod tests {
|
|||||||
let options = options_from_column_schema(&schema);
|
let options = options_from_column_schema(&schema);
|
||||||
assert!(options.is_none());
|
assert!(options.is_none());
|
||||||
|
|
||||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||||
.with_fulltext_options(FulltextOptions {
|
.with_fulltext_options(FulltextOptions {
|
||||||
enable: true,
|
enable: true,
|
||||||
analyzer: FulltextAnalyzer::English,
|
analyzer: FulltextAnalyzer::English,
|
||||||
case_sensitive: false,
|
case_sensitive: false,
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap();
|
||||||
.set_inverted_index(true);
|
schema.set_inverted_index(true);
|
||||||
let options = options_from_column_schema(&schema).unwrap();
|
let options = options_from_column_schema(&schema).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ api.workspace = true
|
|||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -31,7 +31,7 @@ common-version.workspace = true
|
|||||||
dashmap.workspace = true
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
humantime.workspace = true
|
humantime.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
@@ -39,7 +39,7 @@ lazy_static.workspace = true
|
|||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
moka = { workspace = true, features = ["future", "sync"] }
|
moka = { workspace = true, features = ["future", "sync"] }
|
||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
paste = "1.0"
|
paste.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
rustc-hash.workspace = true
|
rustc-hash.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
@@ -49,7 +49,7 @@ sql.workspace = true
|
|||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
table.workspace = true
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-stream = "0.1"
|
tokio-stream.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
cache.workspace = true
|
cache.workspace = true
|
||||||
|
|||||||
@@ -122,13 +122,6 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
|
||||||
CompileScriptInternal {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: BoxedError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
table_info: String,
|
table_info: String,
|
||||||
@@ -343,9 +336,7 @@ impl ErrorExt for Error {
|
|||||||
Error::DecodePlan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::Internal { source, .. } => source.status_code(),
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ impl KvBackend for CachedKvBackend {
|
|||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |v| !self.validate_version(*v))
|
.is_some_and(|v| !self.validate_version(*v))
|
||||||
{
|
{
|
||||||
self.cache.invalidate(key).await;
|
self.cache.invalidate(key).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ pub mod information_schema {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub mod table_source;
|
pub mod table_source;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait CatalogManager: Send + Sync {
|
pub trait CatalogManager: Send + Sync {
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
///
|
///
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ use crate::error::{
|
|||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ pub const FLOWNODE_IDS: &str = "flownode_ids";
|
|||||||
pub const OPTIONS: &str = "options";
|
pub const OPTIONS: &str = "options";
|
||||||
|
|
||||||
/// The `information_schema.flows` to provides information about flows in databases.
|
/// The `information_schema.flows` to provides information about flows in databases.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaFlows {
|
pub(super) struct InformationSchemaFlows {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -58,8 +58,11 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
|||||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||||
/// Fulltext index constraint name
|
/// Fulltext index constraint name
|
||||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||||
|
/// Skipping index constraint name
|
||||||
|
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||||
|
|
||||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -246,10 +249,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
if column.is_inverted_indexed() {
|
if column.is_inverted_indexed() {
|
||||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_fulltext_indexed() {
|
||||||
if column.has_fulltext_index_key() {
|
|
||||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_skipping_indexed() {
|
||||||
|
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
if !constraints.is_empty() {
|
if !constraints.is_empty() {
|
||||||
let aggregated_constraints = constraints.join(", ");
|
let aggregated_constraints = constraints.join(", ");
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// The `PARTITIONS` table provides information about partitioned tables.
|
/// The `PARTITIONS` table provides information about partitioned tables.
|
||||||
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
||||||
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaPartitions {
|
pub(super) struct InformationSchemaPartitions {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `end_time`: the ending execution time of the procedure.
|
/// - `end_time`: the ending execution time of the procedure.
|
||||||
/// - `status`: the status of the procedure.
|
/// - `status`: the status of the procedure.
|
||||||
/// - `lock_keys`: the lock keys of the procedure.
|
/// - `lock_keys`: the lock keys of the procedure.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaProcedureInfo {
|
pub(super) struct InformationSchemaProcedureInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `is_leader`: whether the peer is the leader
|
/// - `is_leader`: whether the peer is the leader
|
||||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionPeers {
|
pub(super) struct InformationSchemaRegionPeers {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `index_size`: The sst index files size in bytes.
|
/// - `index_size`: The sst index files size in bytes.
|
||||||
/// - `engine`: The engine type.
|
/// - `engine`: The engine type.
|
||||||
/// - `region_role`: The region role.
|
/// - `region_role`: The region role.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionStatistics {
|
pub(super) struct InformationSchemaRegionStatistics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ use store_api::storage::{ScanRequest, TableId};
|
|||||||
use super::{InformationTable, RUNTIME_METRICS};
|
use super::{InformationTable, RUNTIME_METRICS};
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaMetrics {
|
pub(super) struct InformationSchemaMetrics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ pub const SCHEMA_OPTS: &str = "options";
|
|||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
/// The `information_schema.schemata` table implementation.
|
/// The `information_schema.schemata` table implementation.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaSchemata {
|
pub(super) struct InformationSchemaSchemata {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ use crate::information_schema::Predicates;
|
|||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTableConstraints {
|
pub(super) struct InformationSchemaTableConstraints {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -71,6 +71,7 @@ const TABLE_ID: &str = "table_id";
|
|||||||
pub const ENGINE: &str = "engine";
|
pub const ENGINE: &str = "engine";
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTables {
|
pub(super) struct InformationSchemaTables {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
|||||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||||
|
|
||||||
/// The `information_schema.views` to provides information about views in databases.
|
/// The `information_schema.views` to provides information about views in databases.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaViews {
|
pub(super) struct InformationSchemaViews {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ use super::SystemTable;
|
|||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
/// A memory table with specified schema and columns.
|
/// A memory table with specified schema and columns.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) struct MemoryTable {
|
pub(crate) struct MemoryTable {
|
||||||
pub(crate) table_id: TableId,
|
pub(crate) table_id: TableId,
|
||||||
pub(crate) table_name: &'static str,
|
pub(crate) table_name: &'static str,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
mod pg_catalog_memory_table;
|
mod pg_catalog_memory_table;
|
||||||
mod pg_class;
|
mod pg_class;
|
||||||
|
mod pg_database;
|
||||||
mod pg_namespace;
|
mod pg_namespace;
|
||||||
mod table_names;
|
mod table_names;
|
||||||
|
|
||||||
@@ -26,6 +27,7 @@ use lazy_static::lazy_static;
|
|||||||
use paste::paste;
|
use paste::paste;
|
||||||
use pg_catalog_memory_table::get_schema_columns;
|
use pg_catalog_memory_table::get_schema_columns;
|
||||||
use pg_class::PGClass;
|
use pg_class::PGClass;
|
||||||
|
use pg_database::PGDatabase;
|
||||||
use pg_namespace::PGNamespace;
|
use pg_namespace::PGNamespace;
|
||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
@@ -113,6 +115,10 @@ impl PGCatalogProvider {
|
|||||||
PG_CLASS.to_string(),
|
PG_CLASS.to_string(),
|
||||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||||
);
|
);
|
||||||
|
tables.insert(
|
||||||
|
PG_DATABASE.to_string(),
|
||||||
|
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||||
|
);
|
||||||
self.tables = tables;
|
self.tables = tables;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,6 +141,11 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
|||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
self.namespace_oid_map.clone(),
|
self.namespace_oid_map.clone(),
|
||||||
))),
|
))),
|
||||||
|
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
))),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -100,6 +101,15 @@ impl PGClass {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGClass {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGClass")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGClass {
|
impl SystemTable for PGClass {
|
||||||
fn table_id(&self) -> table::metadata::TableId {
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||||
|
|||||||
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::schema::{Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
|
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||||
|
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
// === column name ===
|
||||||
|
pub const DATNAME: &str = "datname";
|
||||||
|
|
||||||
|
/// The initial capacity of the vector builders.
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `pg_catalog.database` table implementation.
|
||||||
|
pub(super) struct PGDatabase {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
// Workaround to convert schema_name to a numeric id
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for PGDatabase {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("PGDatabase")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGDatabase {
|
||||||
|
pub(super) fn new(
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
u32_column(OID_COLUMN_NAME),
|
||||||
|
string_column(DATNAME),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> PGCDatabaseBuilder {
|
||||||
|
PGCDatabaseBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for PGDatabase {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SystemTable for PGDatabase {
|
||||||
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
|
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
PG_DATABASE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(
|
||||||
|
&self,
|
||||||
|
request: ScanRequest,
|
||||||
|
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the `pg_catalog.pg_database` table row by row
|
||||||
|
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||||
|
/// `nspname` is the schema name.
|
||||||
|
struct PGCDatabaseBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder,
|
||||||
|
datname: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGCDatabaseBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
for schema_name in catalog_manager
|
||||||
|
.schema_names(&catalog_name, query_ctx())
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.add_database(&predicates, &schema_name);
|
||||||
|
}
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||||
|
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||||
|
let row: [(&str, &Value); 2] = [
|
||||||
|
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||||
|
(DATNAME, &Value::from(schema_name)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.oid.push(Some(oid));
|
||||||
|
self.datname.push(Some(schema_name));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> =
|
||||||
|
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
pub(super) mod oid_map;
|
pub(super) mod oid_map;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -87,6 +88,15 @@ impl PGNamespace {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGNamespace {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGNamespace")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGNamespace {
|
impl SystemTable for PGNamespace {
|
||||||
fn schema(&self) -> SchemaRef {
|
fn schema(&self) -> SchemaRef {
|
||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
|
|||||||
@@ -12,7 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub const PG_DATABASE: &str = "pg_databases";
|
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||||
|
pub const PG_DATABASE: &str = "pg_database";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||||
pub const PG_CLASS: &str = "pg_class";
|
pub const PG_CLASS: &str = "pg_class";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||||
pub const PG_TYPE: &str = "pg_type";
|
pub const PG_TYPE: &str = "pg_type";
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ mod tests {
|
|||||||
Projection: person.id AS a, person.name AS b
|
Projection: person.id AS a, person.name AS b
|
||||||
Filter: person.id > Int32(500)
|
Filter: person.id > Int32(500)
|
||||||
TableScan: person"#,
|
TableScan: person"#,
|
||||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
format!("\n{}", source.get_logical_plan().unwrap())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,12 +15,12 @@
|
|||||||
//! Dummy catalog for region server.
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use datafusion::catalog::schema::SchemaProvider;
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider};
|
||||||
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
|
||||||
use datafusion::datasource::TableProvider;
|
use datafusion::datasource::TableProvider;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
@@ -41,6 +41,12 @@ impl DummyCatalogList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogList {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogList").finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl CatalogProviderList for DummyCatalogList {
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
@@ -91,6 +97,14 @@ impl CatalogProvider for DummyCatalogProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A dummy schema provider for [DummyCatalogList].
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct DummySchemaProvider {
|
struct DummySchemaProvider {
|
||||||
@@ -127,3 +141,12 @@ impl SchemaProvider for DummySchemaProvider {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummySchemaProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummySchemaProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.field("schema_name", &self.schema_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
@@ -56,7 +59,6 @@ tokio.workspace = true
|
|||||||
tracing-appender.workspace = true
|
tracing-appender.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util.workspace = true
|
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|||||||
@@ -22,9 +22,13 @@ use clap::Parser;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
use common_meta::kv_backend::rds::PgStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use common_wal::options::WalOptions;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
@@ -55,18 +59,34 @@ where
|
|||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct BenchTableMetadataCommand {
|
pub struct BenchTableMetadataCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
etcd_addr: String,
|
etcd_addr: Option<String>,
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
#[clap(long)]
|
||||||
|
postgres_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
count: u32,
|
count: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BenchTableMetadataCommand {
|
impl BenchTableMetadataCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
|
||||||
.await
|
info!("Using etcd as kv backend");
|
||||||
.unwrap();
|
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
|
||||||
|
} else {
|
||||||
|
Arc::new(MemoryKvBackend::new())
|
||||||
|
};
|
||||||
|
|
||||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
|
||||||
|
info!("Using postgres as kv backend");
|
||||||
|
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
} else {
|
||||||
|
kv_backend
|
||||||
|
};
|
||||||
|
|
||||||
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||||
|
|
||||||
let tool = BenchTableMetadata {
|
let tool = BenchTableMetadata {
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
@@ -165,7 +185,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
|||||||
region_routes
|
region_routes
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
|
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, WalOptions> {
|
||||||
// TODO(niebayes): construct region wal options for benchmark.
|
// TODO(niebayes): construct region wal options for benchmark.
|
||||||
let _ = regions;
|
let _ = regions;
|
||||||
HashMap::default()
|
HashMap::default()
|
||||||
|
|||||||
@@ -49,7 +49,12 @@ impl TableMetadataBencher {
|
|||||||
|
|
||||||
let regions: Vec<_> = (0..64).collect();
|
let regions: Vec<_> = (0..64).collect();
|
||||||
let region_routes = create_region_routes(regions.clone());
|
let region_routes = create_region_routes(regions.clone());
|
||||||
let region_wal_options = create_region_wal_options(regions);
|
let region_wal_options = create_region_wal_options(regions)
|
||||||
|
.into_iter()
|
||||||
|
.map(|(region_id, wal_options)| {
|
||||||
|
(region_id, serde_json::to_string(&wal_options).unwrap())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
@@ -109,9 +114,17 @@ impl TableMetadataBencher {
|
|||||||
let table_info = table_info.unwrap();
|
let table_info = table_info.unwrap();
|
||||||
let table_route = table_route.unwrap();
|
let table_route = table_route.unwrap();
|
||||||
let table_id = table_info.table_info.ident.table_id;
|
let table_id = table_info.table_info.ident.table_id;
|
||||||
|
|
||||||
|
let regions: Vec<_> = (0..64).collect();
|
||||||
|
let region_wal_options = create_region_wal_options(regions);
|
||||||
let _ = self
|
let _ = self
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.delete_table_metadata(table_id, &table_info.table_name(), &table_route)
|
.delete_table_metadata(
|
||||||
|
table_id,
|
||||||
|
&table_info.table_name(),
|
||||||
|
&table_route,
|
||||||
|
®ion_wal_options,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
start.elapsed()
|
start.elapsed()
|
||||||
},
|
},
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user