mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-04 12:22:55 +00:00
Compare commits
424 Commits
v0.11.3
...
async_deco
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6e7fb5e08 | ||
|
|
a5df3954f3 | ||
|
|
32fd850c20 | ||
|
|
2bfdae4f8f | ||
|
|
fcb898e9a4 | ||
|
|
8fa2fdfc42 | ||
|
|
4dc1a1d60f | ||
|
|
e375a18011 | ||
|
|
e0ff701e51 | ||
|
|
25645a3303 | ||
|
|
b32ea7d84c | ||
|
|
f164f6eaf3 | ||
|
|
af1920defc | ||
|
|
7c97fae522 | ||
|
|
b8070adc3a | ||
|
|
11bfb17328 | ||
|
|
1d87bd2d43 | ||
|
|
ababeaf538 | ||
|
|
2cbf51d0be | ||
|
|
3059b04b19 | ||
|
|
352b197be4 | ||
|
|
d0254f9705 | ||
|
|
8a86903c73 | ||
|
|
0bd322a078 | ||
|
|
3811e3f632 | ||
|
|
c14aa176b5 | ||
|
|
a922dcd9df | ||
|
|
530ff53422 | ||
|
|
73ca39f37e | ||
|
|
0acc6b0354 | ||
|
|
face361fcb | ||
|
|
9860bca986 | ||
|
|
3a83c33a48 | ||
|
|
373bd59b07 | ||
|
|
c8db4b286d | ||
|
|
56c8c0651f | ||
|
|
448e588fa7 | ||
|
|
f4cbf1d776 | ||
|
|
b35eefcf45 | ||
|
|
408dd55a2f | ||
|
|
e463942a5b | ||
|
|
0124a0d156 | ||
|
|
e23628a4e0 | ||
|
|
1d637cad51 | ||
|
|
a56030e6a5 | ||
|
|
a71b93dd84 | ||
|
|
37f8341963 | ||
|
|
b90ef10523 | ||
|
|
c8ffa70ab8 | ||
|
|
e0065a5159 | ||
|
|
abf1680d14 | ||
|
|
0e2fd8e2bd | ||
|
|
0e097732ca | ||
|
|
bb62dc2491 | ||
|
|
40cf63d3c4 | ||
|
|
6187fd975f | ||
|
|
6c90f25299 | ||
|
|
dc24c462dc | ||
|
|
31f29d8a77 | ||
|
|
4a277c21ef | ||
|
|
ca81fc6a70 | ||
|
|
e714f7df6c | ||
|
|
1c04ace4b0 | ||
|
|
95d7ca5382 | ||
|
|
a693583a97 | ||
|
|
87b1408d76 | ||
|
|
dee76f0a73 | ||
|
|
11a4f54c49 | ||
|
|
d363c8ee3c | ||
|
|
50b521c526 | ||
|
|
c9d70e0e28 | ||
|
|
c0c87652c3 | ||
|
|
faaa0affd0 | ||
|
|
904d560175 | ||
|
|
765d1277ee | ||
|
|
ccf42a9d97 | ||
|
|
71e2fb895f | ||
|
|
c9671fd669 | ||
|
|
b5efc75aab | ||
|
|
c1d18d9980 | ||
|
|
5d9faaaf39 | ||
|
|
538875abee | ||
|
|
5ed09c4584 | ||
|
|
3f6a41eac5 | ||
|
|
ff0dcf12c5 | ||
|
|
5b1fca825a | ||
|
|
7bd108e2be | ||
|
|
286f225e50 | ||
|
|
4f988b5ba9 | ||
|
|
500d0852eb | ||
|
|
8d05fb3503 | ||
|
|
d7b6718be0 | ||
|
|
6f0783e17e | ||
|
|
d69e93b91a | ||
|
|
76083892cd | ||
|
|
7981c06989 | ||
|
|
97bb1519f8 | ||
|
|
1d8c9c1843 | ||
|
|
71007e200c | ||
|
|
a0ff9e751e | ||
|
|
f6f617d667 | ||
|
|
e8788088a8 | ||
|
|
53b25c04a2 | ||
|
|
62a8b8b9dc | ||
|
|
c8bdeaaa6a | ||
|
|
81da18e5df | ||
|
|
7c65fddb30 | ||
|
|
421e38c481 | ||
|
|
aada5c1706 | ||
|
|
aa8f119bbb | ||
|
|
19a6d15849 | ||
|
|
073aaefe65 | ||
|
|
77223a0f3e | ||
|
|
4ef038d098 | ||
|
|
deb9520970 | ||
|
|
6bba5e0afa | ||
|
|
f359eeb667 | ||
|
|
009dbad581 | ||
|
|
a2047b096c | ||
|
|
6e8b1ba004 | ||
|
|
7fc935c61c | ||
|
|
1e6d2fb1fa | ||
|
|
0d19e8f089 | ||
|
|
c56106b883 | ||
|
|
edb040dea3 | ||
|
|
7bbc87b3c0 | ||
|
|
858dae7b23 | ||
|
|
33a2485f54 | ||
|
|
8ebf454bc1 | ||
|
|
f5b9ade6df | ||
|
|
9c1834accd | ||
|
|
918517d221 | ||
|
|
92d9e81a9f | ||
|
|
224b1d15cd | ||
|
|
b4d5393080 | ||
|
|
73c29bb482 | ||
|
|
198ee87675 | ||
|
|
02af9dd21a | ||
|
|
bb97f1bf16 | ||
|
|
fbd5316fdb | ||
|
|
63d5a69a31 | ||
|
|
954310f917 | ||
|
|
58c6274bf6 | ||
|
|
46947fd1de | ||
|
|
44fffdec8b | ||
|
|
8026b1d72c | ||
|
|
e22aa819be | ||
|
|
beb9c0a797 | ||
|
|
5f6f5e980a | ||
|
|
ccfa40dc41 | ||
|
|
336b941113 | ||
|
|
de3f817596 | ||
|
|
d094f48822 | ||
|
|
342883e922 | ||
|
|
5be81abba3 | ||
|
|
c19ecd7ea2 | ||
|
|
15f4b10065 | ||
|
|
c100a2d1a6 | ||
|
|
ccb1978c98 | ||
|
|
480b05c590 | ||
|
|
0de0fd80b0 | ||
|
|
059cb6fdc3 | ||
|
|
29218b5fe7 | ||
|
|
59e6ec0395 | ||
|
|
79ee230f2a | ||
|
|
0e4bd59fac | ||
|
|
6eccadbf73 | ||
|
|
f29a1c56e9 | ||
|
|
88c3d331a1 | ||
|
|
79acc9911e | ||
|
|
0a169980b7 | ||
|
|
c80d2a3222 | ||
|
|
116bdaf690 | ||
|
|
6341fb86c7 | ||
|
|
fa09e181be | ||
|
|
ab4663ec2b | ||
|
|
fac22575aa | ||
|
|
0e249f69cd | ||
|
|
5d1761f3e5 | ||
|
|
dba6da4d00 | ||
|
|
59b31372aa | ||
|
|
d6b8672e63 | ||
|
|
deaa1f9578 | ||
|
|
f378d218e9 | ||
|
|
5b6279f191 | ||
|
|
698b28c636 | ||
|
|
c4d10313e6 | ||
|
|
f165bfb0af | ||
|
|
4111c18d44 | ||
|
|
5abe4c141a | ||
|
|
adb5c3743c | ||
|
|
7c5ead90ac | ||
|
|
d870987a65 | ||
|
|
dce4ed9f1d | ||
|
|
bbfbc9f0f8 | ||
|
|
b107384cc6 | ||
|
|
2802c8bf28 | ||
|
|
9b9784a557 | ||
|
|
1e61d05211 | ||
|
|
d53b9fbd03 | ||
|
|
d01bc916f1 | ||
|
|
8ea463f516 | ||
|
|
088317fd3a | ||
|
|
69881e3bc1 | ||
|
|
9af4160068 | ||
|
|
45e68603a1 | ||
|
|
1eb4b8ed4f | ||
|
|
05f21679d6 | ||
|
|
35b635f639 | ||
|
|
3ed085459c | ||
|
|
51a8d0a726 | ||
|
|
965a48656f | ||
|
|
4259975be9 | ||
|
|
d2f3f2e24d | ||
|
|
f74a955504 | ||
|
|
6f1b5101a3 | ||
|
|
9f626ec776 | ||
|
|
0163ce8df9 | ||
|
|
2ab235ec9d | ||
|
|
281d9a5920 | ||
|
|
385b1bcbb0 | ||
|
|
5287d46073 | ||
|
|
64ce9d3744 | ||
|
|
80790daae0 | ||
|
|
5daac5fe3d | ||
|
|
4323c20d18 | ||
|
|
f53b6777cc | ||
|
|
87c21e2baa | ||
|
|
d072801ad6 | ||
|
|
0607b38a20 | ||
|
|
e0384a7d46 | ||
|
|
d73815ba84 | ||
|
|
c78a492863 | ||
|
|
859717c309 | ||
|
|
52697a9e66 | ||
|
|
f8d26b433e | ||
|
|
1acfb6ed1c | ||
|
|
7eaabb3ca2 | ||
|
|
3a55f5d17c | ||
|
|
8d5d4000e6 | ||
|
|
a598008ec3 | ||
|
|
86bd54194a | ||
|
|
ccd2b06b7a | ||
|
|
0db10a33d0 | ||
|
|
317fe9eaa5 | ||
|
|
a4761d6245 | ||
|
|
758aef39d8 | ||
|
|
4e3dd04f42 | ||
|
|
18b77408ae | ||
|
|
725d5a9e68 | ||
|
|
4f29e50ef3 | ||
|
|
121ec7936f | ||
|
|
0185a65905 | ||
|
|
f0d30a0f26 | ||
|
|
7e61d1ae27 | ||
|
|
e56dd20426 | ||
|
|
b64c075cdb | ||
|
|
57f8afcb70 | ||
|
|
bd37e086c2 | ||
|
|
66f63ae981 | ||
|
|
95b20592ac | ||
|
|
1855dccdf1 | ||
|
|
5efcb41310 | ||
|
|
f5829364a2 | ||
|
|
87bd12d6df | ||
|
|
c370b4b40d | ||
|
|
3f01f67f94 | ||
|
|
6eb746d994 | ||
|
|
03a144fa56 | ||
|
|
f069ea082f | ||
|
|
9ae48010f0 | ||
|
|
3a996c2f00 | ||
|
|
45d4065fd6 | ||
|
|
9e09be7ba6 | ||
|
|
50583815de | ||
|
|
24ea9cf215 | ||
|
|
78d0fa75c9 | ||
|
|
0685ba265c | ||
|
|
be22da775a | ||
|
|
d33309be2b | ||
|
|
fdbfebf4be | ||
|
|
812a775b3d | ||
|
|
751fa4ede9 | ||
|
|
03a2e6d0c1 | ||
|
|
815ce59a3a | ||
|
|
c19a56c79f | ||
|
|
7f307a4cac | ||
|
|
52eebfce77 | ||
|
|
e18416a726 | ||
|
|
d1f8ea7880 | ||
|
|
2cd1b08ff7 | ||
|
|
0ee41339aa | ||
|
|
369b59c84a | ||
|
|
c305b2b406 | ||
|
|
c89ef85902 | ||
|
|
3d9df822ad | ||
|
|
bc2f05d949 | ||
|
|
05f115e047 | ||
|
|
5cf9d7b6ca | ||
|
|
a1cd194d0c | ||
|
|
a56c430db0 | ||
|
|
6a1ec8db25 | ||
|
|
04708f10aa | ||
|
|
ddf36c8324 | ||
|
|
96b2a5fb28 | ||
|
|
bbbba29afc | ||
|
|
b229c94fba | ||
|
|
2ad50332cb | ||
|
|
513569ed5d | ||
|
|
69d9a2845f | ||
|
|
1067357b72 | ||
|
|
2caf003db0 | ||
|
|
9bf9aa1082 | ||
|
|
353c8230db | ||
|
|
577d81f14c | ||
|
|
856bba5d95 | ||
|
|
89399131dd | ||
|
|
d20b592fe8 | ||
|
|
bcb0f14227 | ||
|
|
3b27adb3fe | ||
|
|
4d6fe31fff | ||
|
|
1b0b9add90 | ||
|
|
2b89970d45 | ||
|
|
53d006292d | ||
|
|
d18c8b5e16 | ||
|
|
e0949c4a11 | ||
|
|
5cf931c417 | ||
|
|
cc5b1d42b0 | ||
|
|
55b7656956 | ||
|
|
75e4f307c9 | ||
|
|
89f2e15ffb | ||
|
|
13ed10556a | ||
|
|
d1108ab581 | ||
|
|
1287d4cb9f | ||
|
|
109fe04d17 | ||
|
|
f1eb76f489 | ||
|
|
11bab0c47c | ||
|
|
588f6755f0 | ||
|
|
dad8ac6f71 | ||
|
|
ef13c52814 | ||
|
|
7471f55c2e | ||
|
|
f4b2d393be | ||
|
|
0cf44e1e47 | ||
|
|
00ad27dd2e | ||
|
|
5ba8bd09fb | ||
|
|
a9f21915ef | ||
|
|
039989f77b | ||
|
|
abf34b845c | ||
|
|
4051be4214 | ||
|
|
5e88c80394 | ||
|
|
6a46f391cc | ||
|
|
c96903e60c | ||
|
|
a23f269bb1 | ||
|
|
f33b378e45 | ||
|
|
267941bbb5 | ||
|
|
074846bbc2 | ||
|
|
88d46a38ae | ||
|
|
de0beabf34 | ||
|
|
68dd2916fb | ||
|
|
d51b65a8bf | ||
|
|
2082c4b6e4 | ||
|
|
c623404fff | ||
|
|
fa3b7ed5ea | ||
|
|
8ece853076 | ||
|
|
4245bff8f2 | ||
|
|
3d4121aefb | ||
|
|
1910d71cb3 | ||
|
|
a578eea801 | ||
|
|
6bf574f098 | ||
|
|
a4d61bcaf1 | ||
|
|
7ea8a44d3a | ||
|
|
2d6f63a504 | ||
|
|
422d18da8b | ||
|
|
66f0581f5b | ||
|
|
c9ad8c7101 | ||
|
|
2107737db1 | ||
|
|
548e1988ab | ||
|
|
218236cc5b | ||
|
|
f04d380259 | ||
|
|
fa773cf480 | ||
|
|
9b4e8555e2 | ||
|
|
c6b7caa2ec | ||
|
|
58d6982c93 | ||
|
|
e662c241e6 | ||
|
|
266919c226 | ||
|
|
7d1bcc9d49 | ||
|
|
18e8c45384 | ||
|
|
c33cf59398 | ||
|
|
421088a868 | ||
|
|
d821dc5a3e | ||
|
|
bfc777e6ac | ||
|
|
8a5384697b | ||
|
|
d0245473a9 | ||
|
|
043d0bd7c2 | ||
|
|
acedff030b | ||
|
|
88f7075a2a | ||
|
|
54698325b6 | ||
|
|
5ffda7e971 | ||
|
|
f82af15eba | ||
|
|
9d7fea902e | ||
|
|
358d5e1d63 | ||
|
|
579059d99f | ||
|
|
53d55c0b6b | ||
|
|
bef6896280 | ||
|
|
4b4c6dbb66 | ||
|
|
e8e9526738 | ||
|
|
fee75a1fad | ||
|
|
b8a78b7838 | ||
|
|
2137c53274 | ||
|
|
03ad6e2a8d | ||
|
|
d53fbcb936 | ||
|
|
8c1959c580 | ||
|
|
e2a41ccaec | ||
|
|
a8012147ab | ||
|
|
60f8dbf7f0 | ||
|
|
9da2e17d0e | ||
|
|
1a8e77a480 | ||
|
|
e1e39993f7 | ||
|
|
a30d918df2 | ||
|
|
2c4ac76754 | ||
|
|
a6893aad42 | ||
|
|
d91517688a | ||
|
|
3d1b8c4fac | ||
|
|
7c69ca0502 |
@@ -3,3 +3,12 @@ linker = "aarch64-linux-gnu-gcc"
|
|||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
sqlness = "run --bin sqlness-runner --"
|
sqlness = "run --bin sqlness-runner --"
|
||||||
|
|
||||||
|
[unstable.git]
|
||||||
|
shallow_index = true
|
||||||
|
shallow_deps = true
|
||||||
|
[unstable.gitoxide]
|
||||||
|
fetch = true
|
||||||
|
checkout = true
|
||||||
|
list_files = true
|
||||||
|
internal_use_git2 = false
|
||||||
|
|||||||
@@ -41,7 +41,14 @@ runs:
|
|||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-ubuntu image
|
- name: Set up qemu for multi-platform builds
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -52,7 +59,7 @@ runs:
|
|||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
- name: Build and push dev-builder-centos image
|
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
@@ -69,8 +76,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
make dev-builder \
|
make dev-builder \
|
||||||
BASE_IMAGE=android \
|
BASE_IMAGE=android \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
|
||||||
|
|||||||
10
.github/actions/build-greptime-images/action.yml
vendored
10
.github/actions/build-greptime-images/action.yml
vendored
@@ -34,8 +34,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -47,7 +47,11 @@ runs:
|
|||||||
password: ${{ inputs.image-registry-password }}
|
password: ${{ inputs.image-registry-password }}
|
||||||
|
|
||||||
- name: Set up qemu for multi-platform builds
|
- name: Set up qemu for multi-platform builds
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# The latest version will lead to segmentation fault.
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
- name: Set up buildx
|
- name: Set up buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|||||||
4
.github/actions/build-images/action.yml
vendored
4
.github/actions/build-images/action.yml
vendored
@@ -22,8 +22,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
push-latest-tag:
|
push-latest-tag:
|
||||||
description: Whether to push the latest tag
|
description: Whether to push the latest tag
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
|
|||||||
@@ -48,12 +48,11 @@ runs:
|
|||||||
path: /tmp/greptime-*.log
|
path: /tmp/greptime-*.log
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
- name: Build greptime
|
- name: Build greptime # Builds standard greptime binary
|
||||||
if: ${{ inputs.dev-mode == 'false' }}
|
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
with:
|
with:
|
||||||
base-image: ubuntu
|
base-image: ubuntu
|
||||||
features: servers/dashboard
|
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
@@ -71,7 +70,7 @@ runs:
|
|||||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||||
with:
|
with:
|
||||||
base-image: centos
|
base-image: centos
|
||||||
features: servers/dashboard
|
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
# Download artifacts from previous jobs, the artifacts will be downloaded to:
|
||||||
# ${WORKING_DIR}
|
# ${WORKING_DIR}
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
|
||||||
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
|
||||||
# ...
|
# ...
|
||||||
|
|||||||
14
.github/actions/release-cn-artifacts/action.yaml
vendored
14
.github/actions/release-cn-artifacts/action.yaml
vendored
@@ -51,8 +51,8 @@ inputs:
|
|||||||
required: true
|
required: true
|
||||||
upload-to-s3:
|
upload-to-s3:
|
||||||
description: Upload to S3
|
description: Upload to S3
|
||||||
required: false
|
required: true
|
||||||
default: 'true'
|
default: 'false'
|
||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -77,13 +77,21 @@ runs:
|
|||||||
with:
|
with:
|
||||||
path: ${{ inputs.artifacts-dir }}
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
|
- name: Install s5cmd
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||||
|
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
|
||||||
|
sudo mv s5cmd /usr/local/bin/
|
||||||
|
sudo chmod +x /usr/local/bin/s5cmd
|
||||||
|
|
||||||
- name: Release artifacts to cn region
|
- name: Release artifacts to cn region
|
||||||
uses: nick-invision/retry@v2
|
uses: nick-invision/retry@v2
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||||
env:
|
env:
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||||
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
AWS_REGION: ${{ inputs.aws-cn-region }}
|
||||||
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||||
with:
|
with:
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||||
|
|||||||
2
.github/actions/start-runner/action.yml
vendored
2
.github/actions/start-runner/action.yml
vendored
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Start EC2 runner
|
- name: Start EC2 runner
|
||||||
if: startsWith(inputs.runner, 'ec2')
|
if: startsWith(inputs.runner, 'ec2')
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
id: start-linux-arm64-ec2-runner
|
id: start-linux-arm64-ec2-runner
|
||||||
with:
|
with:
|
||||||
mode: start
|
mode: start
|
||||||
|
|||||||
2
.github/actions/stop-runner/action.yml
vendored
2
.github/actions/stop-runner/action.yml
vendored
@@ -33,7 +33,7 @@ runs:
|
|||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
if: ${{ inputs.label && inputs.ec2-instance-id }}
|
||||||
uses: machulav/ec2-github-runner@v2
|
uses: machulav/ec2-github-runner@v2.3.8
|
||||||
with:
|
with:
|
||||||
mode: stop
|
mode: stop
|
||||||
label: ${{ inputs.label }}
|
label: ${{ inputs.label }}
|
||||||
|
|||||||
6
.github/actions/upload-artifacts/action.yml
vendored
6
.github/actions/upload-artifacts/action.yml
vendored
@@ -30,9 +30,9 @@ runs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-v0.3.0sha256sum
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0.tar.gz
|
# greptime-linux-amd64-v0.3.0.tar.gz
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0
|
# greptime-linux-amd64-v0.3.0
|
||||||
# └── greptime
|
# └── greptime
|
||||||
- name: Compress artifacts and calculate checksum
|
- name: Compress artifacts and calculate checksum
|
||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
|||||||
14
.github/scripts/upload-artifacts-to-s3.sh
vendored
14
.github/scripts/upload-artifacts-to-s3.sh
vendored
@@ -27,13 +27,13 @@ function upload_artifacts() {
|
|||||||
# ├── latest-version.txt
|
# ├── latest-version.txt
|
||||||
# ├── latest-nightly-version.txt
|
# ├── latest-nightly-version.txt
|
||||||
# ├── v0.1.0
|
# ├── v0.1.0
|
||||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum
|
||||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz
|
||||||
# └── v0.2.0
|
# └── v0.2.0
|
||||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
# ├── greptime-darwin-amd64-v0.2.0.sha256sum
|
||||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
# └── greptime-darwin-amd64-v0.2.0.tar.gz
|
||||||
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@@ -45,7 +45,7 @@ function update_version_info() {
|
|||||||
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
echo "Updating latest-version.txt"
|
echo "Updating latest-version.txt"
|
||||||
echo "$VERSION" > latest-version.txt
|
echo "$VERSION" > latest-version.txt
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ function update_version_info() {
|
|||||||
if [[ "$VERSION" == *"nightly"* ]]; then
|
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||||
echo "Updating latest-nightly-version.txt"
|
echo "Updating latest-nightly-version.txt"
|
||||||
echo "$VERSION" > latest-nightly-version.txt
|
echo "$VERSION" > latest-nightly-version.txt
|
||||||
aws s3 cp \
|
s5cmd cp \
|
||||||
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
4
.github/workflows/apidoc.yml
vendored
4
.github/workflows/apidoc.yml
vendored
@@ -14,9 +14,11 @@ name: Build API docs
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
2
.github/workflows/dependency-check.yml
vendored
2
.github/workflows/dependency-check.yml
vendored
@@ -12,6 +12,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Set up Rust
|
- name: Set up Rust
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
|||||||
57
.github/workflows/dev-build.yml
vendored
57
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -29,7 +29,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.8xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
@@ -76,20 +76,14 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
# Use the different image name to avoid conflict with the release images.
|
|
||||||
IMAGE_NAME: greptimedb-dev
|
|
||||||
|
|
||||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -107,6 +101,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -161,6 +156,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -168,6 +164,7 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
persist-credentials: true
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -192,6 +189,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Checkout greptimedb
|
- name: Checkout greptimedb
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -199,6 +197,7 @@ jobs:
|
|||||||
repository: ${{ inputs.repository }}
|
repository: ${{ inputs.repository }}
|
||||||
ref: ${{ inputs.commit }}
|
ref: ${{ inputs.commit }}
|
||||||
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
persist-credentials: true
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -219,25 +218,33 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ env.IMAGE_NAME }}
|
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
dev-mode: true # Only build the standard images.
|
dev-mode: true # Only build the standard images.
|
||||||
|
|
||||||
|
- name: Echo Docker image tag to step summary
|
||||||
|
run: |
|
||||||
|
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Set build result
|
- name: Set build result
|
||||||
id: set-build-result
|
id: set-build-result
|
||||||
@@ -251,19 +258,20 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ env.IMAGE_NAME }}
|
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -273,6 +281,7 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
upload-to-s3: false
|
||||||
dev-mode: true # Only build the standard images(exclude centos images).
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
update-version-info: false # Don't update the version info in S3.
|
update-version-info: false # Don't update the version info in S3.
|
||||||
@@ -281,7 +290,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -291,6 +300,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -306,7 +316,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -316,6 +326,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -333,11 +344,17 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
72
.github/workflows/develop.yml
vendored
72
.github/workflows/develop.yml
vendored
@@ -1,4 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 15 * * 1-5"
|
||||||
merge_group:
|
merge_group:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||||
@@ -21,9 +23,11 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
name: Check typos and docs
|
name: Check typos and docs
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
- name: Check the config docs
|
- name: Check the config docs
|
||||||
run: |
|
run: |
|
||||||
@@ -32,10 +36,12 @@ jobs:
|
|||||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@@ -43,10 +49,12 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -64,10 +72,12 @@ jobs:
|
|||||||
|
|
||||||
toml:
|
toml:
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Install taplo
|
- name: Install taplo
|
||||||
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
|
||||||
@@ -79,10 +89,12 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -99,7 +111,7 @@ jobs:
|
|||||||
- name: Build greptime binaries
|
- name: Build greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
||||||
- name: Pack greptime binaries
|
- name: Pack greptime binaries
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -137,6 +149,8 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -190,6 +204,8 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -232,10 +248,12 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -252,7 +270,7 @@ jobs:
|
|||||||
- name: Build greptime bianry
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -293,6 +311,8 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
- if: matrix.mode.minio
|
- if: matrix.mode.minio
|
||||||
@@ -435,6 +455,8 @@ jobs:
|
|||||||
echo "Disk space after:"
|
echo "Disk space after:"
|
||||||
df -h
|
df -h
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
- name: Setup Chaos Mesh
|
- name: Setup Chaos Mesh
|
||||||
@@ -546,7 +568,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
opts: ""
|
opts: ""
|
||||||
@@ -554,9 +576,14 @@ jobs:
|
|||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
opts: "-w kafka -k 127.0.0.1:9092"
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
kafka: true
|
kafka: true
|
||||||
|
- name: "Pg Kvbackend"
|
||||||
|
opts: "--setup-pg"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- if: matrix.mode.kafka
|
- if: matrix.mode.kafka
|
||||||
name: Setup kafka server
|
name: Setup kafka server
|
||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
@@ -580,10 +607,12 @@ jobs:
|
|||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -595,10 +624,12 @@ jobs:
|
|||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -621,16 +652,20 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Merge Conflict Finder
|
- name: Merge Conflict Finder
|
||||||
uses: olivernybroe/action-conflict-finder@v4.0
|
uses: olivernybroe/action-conflict-finder@v4.0
|
||||||
|
|
||||||
test:
|
test:
|
||||||
if: github.event_name != 'merge_group'
|
if: github.event_name != 'merge_group'
|
||||||
runs-on: ubuntu-24.04-arm
|
runs-on: ubuntu-22.04-arm
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
needs: [conflict-check, clippy, fmt]
|
needs: [conflict-check, clippy, fmt]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -652,10 +687,11 @@ jobs:
|
|||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait
|
run: docker compose up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
|
RUST_MIN_STACK: 8388608 # 8MB
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
@@ -668,16 +704,19 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event_name == 'merge_group'
|
if: github.event_name == 'merge_group'
|
||||||
runs-on: ubuntu-20.04-8-cores
|
runs-on: ubuntu-22.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -701,7 +740,7 @@ jobs:
|
|||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait
|
run: docker compose up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
@@ -717,6 +756,7 @@ jobs:
|
|||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||||
|
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
@@ -732,7 +772,7 @@ jobs:
|
|||||||
# compat:
|
# compat:
|
||||||
# name: Compatibility Test
|
# name: Compatibility Test
|
||||||
# needs: build
|
# needs: build
|
||||||
# runs-on: ubuntu-20.04
|
# runs-on: ubuntu-22.04
|
||||||
# timeout-minutes: 60
|
# timeout-minutes: 60
|
||||||
# steps:
|
# steps:
|
||||||
# - uses: actions/checkout@v4
|
# - uses: actions/checkout@v4
|
||||||
|
|||||||
13
.github/workflows/docbot.yml
vendored
13
.github/workflows/docbot.yml
vendored
@@ -3,16 +3,21 @@ on:
|
|||||||
pull_request_target:
|
pull_request_target:
|
||||||
types: [opened, edited]
|
types: [opened, edited]
|
||||||
|
|
||||||
permissions:
|
concurrency:
|
||||||
pull-requests: write
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
contents: read
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docbot:
|
docbot:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Maybe Follow Up Docs Issue
|
- name: Maybe Follow Up Docs Issue
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
23
.github/workflows/docs.yml
vendored
23
.github/workflows/docs.yml
vendored
@@ -31,38 +31,47 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: crate-ci/typos@master
|
- uses: crate-ci/typos@master
|
||||||
|
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
name: Check License Header
|
name: Check License Header
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: korandoru/hawkeye@v5
|
- uses: korandoru/hawkeye@v5
|
||||||
|
|
||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
@@ -71,7 +80,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-latest ]
|
||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
|||||||
52
.github/workflows/grafana.yml
vendored
Normal file
52
.github/workflows/grafana.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
name: Check Grafana Panels
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-panels:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# Check out the repository
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Install jq (required for the script)
|
||||||
|
- name: Install jq
|
||||||
|
run: sudo apt-get install -y jq
|
||||||
|
|
||||||
|
# Make the check.sh script executable
|
||||||
|
- name: Make check.sh executable
|
||||||
|
run: chmod +x grafana/check.sh
|
||||||
|
|
||||||
|
# Run the check.sh script
|
||||||
|
- name: Run check.sh
|
||||||
|
run: ./grafana/check.sh
|
||||||
|
|
||||||
|
# Only run summary.sh for pull_request events (not for merge queues or final pushes)
|
||||||
|
- name: Check if this is a pull request
|
||||||
|
id: check-pr
|
||||||
|
run: |
|
||||||
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
|
echo "is_pull_request=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "is_pull_request=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make the summary.sh script executable
|
||||||
|
- name: Make summary.sh executable
|
||||||
|
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||||
|
run: chmod +x grafana/summary.sh
|
||||||
|
|
||||||
|
# Run the summary.sh script and add its output to the GitHub Job Summary
|
||||||
|
- name: Run summary.sh and add to Job Summary
|
||||||
|
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||||
|
run: |
|
||||||
|
SUMMARY=$(./grafana/summary.sh)
|
||||||
|
echo "### Summary of Grafana Panels" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY
|
||||||
51
.github/workflows/nightly-build.yml
vendored
51
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -27,7 +27,7 @@ on:
|
|||||||
linux_arm64_runner:
|
linux_arm64_runner:
|
||||||
type: choice
|
type: choice
|
||||||
description: The runner uses to build linux-arm64 artifacts
|
description: The runner uses to build linux-arm64 artifacts
|
||||||
default: ec2-c6g.8xlarge-arm64
|
default: ec2-c6g.4xlarge-arm64
|
||||||
options:
|
options:
|
||||||
- ec2-c6g.xlarge-arm64 # 4C8G
|
- ec2-c6g.xlarge-arm64 # 4C8G
|
||||||
- ec2-c6g.2xlarge-arm64 # 8C16G
|
- ec2-c6g.2xlarge-arm64 # 8C16G
|
||||||
@@ -66,18 +66,11 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
# Use the different image name to avoid conflict with the release images.
|
|
||||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
|
||||||
IMAGE_NAME: greptimedb-nightly
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -95,6 +88,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Create version
|
- name: Create version
|
||||||
id: create-version
|
id: create-version
|
||||||
@@ -147,6 +141,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -168,6 +163,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -186,24 +182,25 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ env.IMAGE_NAME }}
|
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: true
|
push-latest-tag: false
|
||||||
|
|
||||||
- name: Set nightly build result
|
- name: Set nightly build result
|
||||||
id: set-nightly-build-result
|
id: set-nightly-build-result
|
||||||
@@ -217,7 +214,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -226,13 +223,14 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: ${{ env.IMAGE_NAME }}
|
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -242,15 +240,16 @@ jobs:
|
|||||||
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
upload-to-s3: false
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
update-version-info: false # Don't update version info in S3.
|
update-version-info: false # Don't update version info in S3.
|
||||||
push-latest-tag: true
|
push-latest-tag: false
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -260,6 +259,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -275,7 +275,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -285,6 +285,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -302,11 +303,15 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
76
.github/workflows/nightly-ci.yml
vendored
76
.github/workflows/nightly-ci.yml
vendored
@@ -9,19 +9,17 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness-test:
|
||||||
name: Run sqlness test
|
name: Run sqlness test
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Check install.sh
|
- name: Check install.sh
|
||||||
run: ./.github/scripts/check-install-script.sh
|
run: ./.github/scripts/check-install-script.sh
|
||||||
@@ -46,9 +44,14 @@ jobs:
|
|||||||
name: Sqlness tests on Windows
|
name: Sqlness tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-2022-8-cores
|
runs-on: windows-2022-8-cores
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -76,6 +79,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
@@ -108,67 +114,26 @@ jobs:
|
|||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|
||||||
## this is designed for generating cache that usable for pull requests
|
|
||||||
test-on-linux:
|
|
||||||
name: Run tests on Linux
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
runs-on: ubuntu-20.04-8-cores
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: arduino/setup-protoc@v3
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- uses: rui314/setup-mold@v1
|
|
||||||
- name: Install Rust toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
with:
|
|
||||||
# Shares cross multiple jobs
|
|
||||||
shared-key: "coverage-test"
|
|
||||||
- name: Install Cargo Nextest
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
- name: Setup external services
|
|
||||||
working-directory: tests-integration/fixtures
|
|
||||||
run: docker compose up -d --wait
|
|
||||||
- name: Running tests
|
|
||||||
run: cargo nextest run -F dashboard -F pg_kvbackend
|
|
||||||
env:
|
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
|
||||||
GT_MINIO_BUCKET: greptime
|
|
||||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
|
||||||
GT_MINIO_ACCESS_KEY: superpower_password
|
|
||||||
GT_MINIO_REGION: us-west-2
|
|
||||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
|
||||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
|
||||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
|
||||||
|
|
||||||
cleanbuild-linux-nix:
|
cleanbuild-linux-nix:
|
||||||
name: Run clean build on Linux
|
name: Run clean build on Linux
|
||||||
runs-on: ubuntu-latest-8-cores
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: cachix/install-nix-action@v27
|
- uses: cachix/install-nix-action@v27
|
||||||
with:
|
with:
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
nix_path: nixpkgs=channel:nixos-24.11
|
||||||
- run: nix-shell --pure --run "cargo build"
|
- run: nix develop --command cargo build
|
||||||
|
|
||||||
check-status:
|
check-status:
|
||||||
name: Check status
|
name: Check status
|
||||||
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
needs: [sqlness-test, sqlness-windows, test-on-windows]
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -181,11 +146,14 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [check-status]
|
needs: [check-status]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ jobs:
|
|||||||
release-dev-builder-images:
|
release-dev-builder-images:
|
||||||
name: Release dev builder images
|
name: Release dev builder images
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||||
runs-on: ubuntu-20.04-16-cores
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.set-version.outputs.version }}
|
version: ${{ steps.set-version.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
@@ -37,6 +37,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Configure build image version
|
- name: Configure build image version
|
||||||
id: set-version
|
id: set-version
|
||||||
@@ -62,7 +63,7 @@ jobs:
|
|||||||
|
|
||||||
release-dev-builder-images-ecr:
|
release-dev-builder-images-ecr:
|
||||||
name: Release dev builder images to AWS ECR
|
name: Release dev builder images to AWS ECR
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -85,51 +86,69 @@ jobs:
|
|||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
|
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \
|
||||||
docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
|
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest
|
||||||
|
|
||||||
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
name: Release dev builder images to CN region
|
name: Release dev builder images to CN region
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
release-dev-builder-images
|
release-dev-builder-images
|
||||||
]
|
]
|
||||||
@@ -144,29 +163,41 @@ jobs:
|
|||||||
- name: Push dev-builder-ubuntu image
|
- name: Push dev-builder-ubuntu image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION
|
||||||
|
|
||||||
- name: Push dev-builder-centos image
|
- name: Push dev-builder-centos image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_centos_image }}
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION
|
||||||
|
|
||||||
- name: Push dev-builder-android image
|
- name: Push dev-builder-android image
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.release_dev_builder_android_image }}
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
env:
|
||||||
|
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
|
||||||
|
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
run: |
|
run: |
|
||||||
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
|
||||||
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
|
||||||
quay.io/skopeo/stable:latest \
|
quay.io/skopeo/stable:latest \
|
||||||
copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
|
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \
|
||||||
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
|
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION
|
||||||
|
|||||||
62
.github/workflows/release.yml
vendored
62
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-20.04
|
- ubuntu-22.04
|
||||||
- ubuntu-20.04-8-cores
|
- ubuntu-22.04-8-cores
|
||||||
- ubuntu-20.04-16-cores
|
- ubuntu-22.04-16-cores
|
||||||
- ubuntu-20.04-32-cores
|
- ubuntu-22.04-32-cores
|
||||||
- ubuntu-20.04-64-cores
|
- ubuntu-22.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -91,18 +91,13 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.11.0
|
NEXT_RELEASE_VERSION: v0.13.0
|
||||||
|
|
||||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
|
||||||
permissions:
|
|
||||||
issues: write # Allows the action to create issues for cyborg.
|
|
||||||
contents: write # Allows the action to create a release.
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -122,6 +117,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Check Rust toolchain version
|
- name: Check Rust toolchain version
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -181,6 +177,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -202,6 +199,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-linux-artifacts
|
- uses: ./.github/actions/build-linux-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -237,6 +235,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-macos-artifacts
|
- uses: ./.github/actions/build-macos-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -276,6 +275,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- uses: ./.github/actions/build-windows-artifacts
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
with:
|
with:
|
||||||
@@ -299,22 +299,25 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Build and push images to dockerhub
|
- name: Build and push images to dockerhub
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/build-images
|
||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
push-latest-tag: true
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -332,7 +335,7 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -341,13 +344,14 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Release artifacts to CN region
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/release-cn-artifacts
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: greptimedb
|
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -358,6 +362,7 @@ jobs:
|
|||||||
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
dev-mode: false
|
dev-mode: false
|
||||||
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: true
|
push-latest-tag: true
|
||||||
|
|
||||||
@@ -372,11 +377,12 @@ jobs:
|
|||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Publish GitHub release
|
- name: Publish GitHub release
|
||||||
uses: ./.github/actions/publish-github-release
|
uses: ./.github/actions/publish-github-release
|
||||||
@@ -390,7 +396,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -400,6 +406,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -415,7 +422,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -425,6 +432,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Stop EC2 runner
|
- name: Stop EC2 runner
|
||||||
uses: ./.github/actions/stop-runner
|
uses: ./.github/actions/stop-runner
|
||||||
@@ -440,9 +448,16 @@ jobs:
|
|||||||
name: Bump doc version
|
name: Bump doc version
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [allocate-runners]
|
needs: [allocate-runners]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Bump doc version
|
- name: Bump doc version
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
@@ -460,11 +475,18 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Report CI status
|
- name: Report CI status
|
||||||
id: report-ci-status
|
id: report-ci-status
|
||||||
|
|||||||
10
.github/workflows/schedule.yml
vendored
10
.github/workflows/schedule.yml
vendored
@@ -4,18 +4,20 @@ on:
|
|||||||
- cron: '4 2 * * *'
|
- cron: '4 2 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
maintenance:
|
maintenance:
|
||||||
name: Periodic Maintenance
|
name: Periodic Maintenance
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Do Maintenance
|
- name: Do Maintenance
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
10
.github/workflows/semantic-pull-request.yml
vendored
10
.github/workflows/semantic-pull-request.yml
vendored
@@ -1,18 +1,24 @@
|
|||||||
name: "Semantic Pull Request"
|
name: "Semantic Pull Request"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request_target:
|
pull_request:
|
||||||
types:
|
types:
|
||||||
- opened
|
- opened
|
||||||
- reopened
|
- reopened
|
||||||
- edited
|
- edited
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: ./.github/actions/setup-cyborg
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- name: Check Pull Request
|
- name: Check Pull Request
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
|
|||||||
21
AUTHOR.md
21
AUTHOR.md
@@ -3,30 +3,28 @@
|
|||||||
## Individual Committers (in alphabetical order)
|
## Individual Committers (in alphabetical order)
|
||||||
|
|
||||||
* [CookiePieWw](https://github.com/CookiePieWw)
|
* [CookiePieWw](https://github.com/CookiePieWw)
|
||||||
* [KKould](https://github.com/KKould)
|
|
||||||
* [NiwakaDev](https://github.com/NiwakaDev)
|
|
||||||
* [etolbakov](https://github.com/etolbakov)
|
* [etolbakov](https://github.com/etolbakov)
|
||||||
* [irenjj](https://github.com/irenjj)
|
* [irenjj](https://github.com/irenjj)
|
||||||
* [tisonkun](https://github.com/tisonkun)
|
* [KKould](https://github.com/KKould)
|
||||||
* [Lanqing Yang](https://github.com/lyang24)
|
* [Lanqing Yang](https://github.com/lyang24)
|
||||||
|
* [NiwakaDev](https://github.com/NiwakaDev)
|
||||||
|
* [tisonkun](https://github.com/tisonkun)
|
||||||
|
|
||||||
|
|
||||||
## Team Members (in alphabetical order)
|
## Team Members (in alphabetical order)
|
||||||
|
|
||||||
* [Breeze-P](https://github.com/Breeze-P)
|
|
||||||
* [GrepTime](https://github.com/GrepTime)
|
|
||||||
* [MichaelScofield](https://github.com/MichaelScofield)
|
|
||||||
* [Wenjie0329](https://github.com/Wenjie0329)
|
|
||||||
* [WenyXu](https://github.com/WenyXu)
|
|
||||||
* [ZonaHex](https://github.com/ZonaHex)
|
|
||||||
* [apdong2022](https://github.com/apdong2022)
|
* [apdong2022](https://github.com/apdong2022)
|
||||||
* [beryl678](https://github.com/beryl678)
|
* [beryl678](https://github.com/beryl678)
|
||||||
|
* [Breeze-P](https://github.com/Breeze-P)
|
||||||
* [daviderli614](https://github.com/daviderli614)
|
* [daviderli614](https://github.com/daviderli614)
|
||||||
* [discord9](https://github.com/discord9)
|
* [discord9](https://github.com/discord9)
|
||||||
* [evenyag](https://github.com/evenyag)
|
* [evenyag](https://github.com/evenyag)
|
||||||
* [fengjiachun](https://github.com/fengjiachun)
|
* [fengjiachun](https://github.com/fengjiachun)
|
||||||
* [fengys1996](https://github.com/fengys1996)
|
* [fengys1996](https://github.com/fengys1996)
|
||||||
|
* [GrepTime](https://github.com/GrepTime)
|
||||||
* [holalengyu](https://github.com/holalengyu)
|
* [holalengyu](https://github.com/holalengyu)
|
||||||
* [killme2008](https://github.com/killme2008)
|
* [killme2008](https://github.com/killme2008)
|
||||||
|
* [MichaelScofield](https://github.com/MichaelScofield)
|
||||||
* [nicecui](https://github.com/nicecui)
|
* [nicecui](https://github.com/nicecui)
|
||||||
* [paomian](https://github.com/paomian)
|
* [paomian](https://github.com/paomian)
|
||||||
* [shuiyisong](https://github.com/shuiyisong)
|
* [shuiyisong](https://github.com/shuiyisong)
|
||||||
@@ -34,11 +32,14 @@
|
|||||||
* [sunng87](https://github.com/sunng87)
|
* [sunng87](https://github.com/sunng87)
|
||||||
* [v0y4g3r](https://github.com/v0y4g3r)
|
* [v0y4g3r](https://github.com/v0y4g3r)
|
||||||
* [waynexia](https://github.com/waynexia)
|
* [waynexia](https://github.com/waynexia)
|
||||||
|
* [Wenjie0329](https://github.com/Wenjie0329)
|
||||||
|
* [WenyXu](https://github.com/WenyXu)
|
||||||
* [xtang](https://github.com/xtang)
|
* [xtang](https://github.com/xtang)
|
||||||
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
|
||||||
* [zhongzc](https://github.com/zhongzc)
|
* [zhongzc](https://github.com/zhongzc)
|
||||||
|
* [ZonaHex](https://github.com/ZonaHex)
|
||||||
* [zyy17](https://github.com/zyy17)
|
* [zyy17](https://github.com/zyy17)
|
||||||
|
|
||||||
## All Contributors
|
## All Contributors
|
||||||
|
|
||||||
[](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)
|
||||||
|
|||||||
4052
Cargo.lock
generated
4052
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
94
Cargo.toml
94
Cargo.toml
@@ -55,7 +55,6 @@ members = [
|
|||||||
"src/promql",
|
"src/promql",
|
||||||
"src/puffin",
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
@@ -68,7 +67,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.11.3"
|
version = "0.13.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -79,11 +78,10 @@ clippy.dbg_macro = "warn"
|
|||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
# Remove this after https://github.com/PyO3/pyo3/issues/4094
|
|
||||||
rust.non_local_definitions = "allow"
|
|
||||||
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
|
||||||
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
# We turn off default-features for some dependencies here so the workspaces which inherit them can
|
||||||
# selectively turn them on if needed, since we can override default-features = true (from false)
|
# selectively turn them on if needed, since we can override default-features = true (from false)
|
||||||
# for the inherited dependency but cannot do the reverse (override from true to false).
|
# for the inherited dependency but cannot do the reverse (override from true to false).
|
||||||
@@ -91,55 +89,67 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
|||||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "51.0.0", features = ["prettyprint"] }
|
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||||
arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
|
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||||
arrow-flight = "51.0"
|
arrow-flight = "53.0"
|
||||||
arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
|
arrow-ipc = { version = "53.0.0", default-features = false, features = ["lz4", "zstd"] }
|
||||||
arrow-schema = { version = "51.0", features = ["serde"] }
|
arrow-schema = { version = "53.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
axum = { version = "0.6", features = ["headers"] }
|
# Remember to update axum-extra, axum-macros when updating axum
|
||||||
|
axum = "0.8"
|
||||||
|
axum-extra = "0.10"
|
||||||
|
axum-macros = "0.4"
|
||||||
|
backon = "1"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
bigdecimal = "0.4.2"
|
bigdecimal = "0.4.2"
|
||||||
bitflags = "2.4.1"
|
bitflags = "2.4.1"
|
||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.7", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
|
chrono-tz = "0.10.1"
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
|
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||||
|
deadpool = "0.10"
|
||||||
|
deadpool-postgres = "0.12"
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
etcd-client = "0.13"
|
etcd-client = "0.14"
|
||||||
|
flate2 = { version = "1.1.0", default-features = false, features = ["zlib-rs"] }
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "43ddd8dea69f4df0fe2e8b5cdc0044d2cfa35908" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c5419bbd20cb42e568ec325a4d71a3c94cc327e1" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "0.2"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
|
hyper = "1.1"
|
||||||
|
hyper-util = "0.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
|
local-ip-address = "0.6"
|
||||||
|
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||||
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||||
mockall = "0.11.4"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
nalgebra = "0.33"
|
nalgebra = "0.33"
|
||||||
notify = "6.1"
|
notify = "6.1"
|
||||||
num_cpus = "1.16"
|
num_cpus = "1.16"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.5", features = [
|
opentelemetry-proto = { version = "0.27", features = [
|
||||||
"gen-tonic",
|
"gen-tonic",
|
||||||
"metrics",
|
"metrics",
|
||||||
"trace",
|
"trace",
|
||||||
@@ -147,12 +157,12 @@ opentelemetry-proto = { version = "0.5", features = [
|
|||||||
"logs",
|
"logs",
|
||||||
] }
|
] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
parquet = { version = "53.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
promql-parser = { version = "0.4.3", features = ["ser"] }
|
promql-parser = { version = "0.5", features = ["ser"] }
|
||||||
prost = "0.12"
|
prost = "0.13"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
ratelimit = "0.9"
|
ratelimit = "0.9"
|
||||||
@@ -171,33 +181,40 @@ rstest = "0.21"
|
|||||||
rstest_reuse = "0.7"
|
rstest_reuse = "0.7"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
rustc-hash = "2.0"
|
rustc-hash = "2.0"
|
||||||
|
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
shadow-rs = "0.35"
|
shadow-rs = "0.38"
|
||||||
similar-asserts = "1.6.0"
|
similar-asserts = "1.6.0"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
|
sqlx = { version = "0.8", features = [
|
||||||
|
"runtime-tokio-rustls",
|
||||||
|
"mysql",
|
||||||
|
] }
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.52.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
"serde",
|
"serde",
|
||||||
] }
|
] } # on branch v0.44.x
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.40", features = ["full"] }
|
tokio = { version = "1.40", features = ["full"] }
|
||||||
tokio-postgres = "0.7"
|
tokio-postgres = "0.7"
|
||||||
|
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = "0.4"
|
tower = "0.5"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
@@ -254,7 +271,6 @@ plugins = { path = "src/plugins" }
|
|||||||
promql = { path = "src/promql" }
|
promql = { path = "src/promql" }
|
||||||
puffin = { path = "src/puffin" }
|
puffin = { path = "src/puffin" }
|
||||||
query = { path = "src/query" }
|
query = { path = "src/query" }
|
||||||
script = { path = "src/script" }
|
|
||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
@@ -264,18 +280,16 @@ table = { path = "src/table" }
|
|||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
|
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||||
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
|
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
|
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||||
# Apply a fix for pprof for unaligned pointer access
|
|
||||||
pprof = { git = "https://github.com/GreptimeTeam/pprof-rs", rev = "1bd1e21" }
|
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
rev = "a10facb353b41460eeb98578868ebf19c2084fac"
|
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = 1
|
debug = 1
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
[target.aarch64-unknown-linux-gnu]
|
||||||
|
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
|
||||||
|
|
||||||
[build]
|
[build]
|
||||||
pre-build = [
|
pre-build = [
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
||||||
@@ -5,3 +8,8 @@ pre-build = [
|
|||||||
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
|
||||||
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[build.env]
|
||||||
|
passthrough = [
|
||||||
|
"JEMALLOC_SYS_WITH_LG_PAGE",
|
||||||
|
]
|
||||||
|
|||||||
9
Makefile
9
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
|
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
@@ -60,6 +60,8 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
|||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||||
|
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
||||||
else
|
else
|
||||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||||
endif
|
endif
|
||||||
@@ -165,15 +167,14 @@ nextest: ## Install nextest tools.
|
|||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness ${SQLNESS_OPTS}
|
cargo sqlness ${SQLNESS_OPTS}
|
||||||
|
|
||||||
# Run fuzz test ${FUZZ_TARGET}.
|
|
||||||
RUNS ?= 1
|
RUNS ?= 1
|
||||||
FUZZ_TARGET ?= fuzz_alter_table
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
.PHONY: fuzz
|
.PHONY: fuzz
|
||||||
fuzz:
|
fuzz: ## Run fuzz test ${FUZZ_TARGET}.
|
||||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||||
|
|
||||||
.PHONY: fuzz-ls
|
.PHONY: fuzz-ls
|
||||||
fuzz-ls:
|
fuzz-ls: ## List all fuzz targets.
|
||||||
cargo fuzz list --fuzz-dir tests-fuzz
|
cargo fuzz list --fuzz-dir tests-fuzz
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
||||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
<a href="https://docs.greptime.com/">User Guide</a> |
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
||||||
@@ -116,7 +116,7 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
|||||||
--name greptime --rm \
|
--name greptime --rm \
|
||||||
greptime/greptimedb:latest standalone start \
|
greptime/greptimedb:latest standalone start \
|
||||||
--http-addr 0.0.0.0:4000 \
|
--http-addr 0.0.0.0:4000 \
|
||||||
--rpc-addr 0.0.0.0:4001 \
|
--rpc-bind-addr 0.0.0.0:4001 \
|
||||||
--mysql-addr 0.0.0.0:4002 \
|
--mysql-addr 0.0.0.0:4002 \
|
||||||
--postgres-addr 0.0.0.0:4003
|
--postgres-addr 0.0.0.0:4003
|
||||||
```
|
```
|
||||||
@@ -138,7 +138,8 @@ Check the prerequisite:
|
|||||||
|
|
||||||
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
|
||||||
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
|
||||||
* Python toolchain (optional): Required only if built with PyO3 backend. More details for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
|
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora)
|
||||||
|
* Python toolchain (optional): Required only if using some test scripts.
|
||||||
|
|
||||||
Build GreptimeDB binary:
|
Build GreptimeDB binary:
|
||||||
|
|
||||||
@@ -228,4 +229,3 @@ Special thanks to all the contributors who have propelled GreptimeDB forward. Fo
|
|||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
|
||||||
|
|||||||
@@ -26,8 +26,10 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
@@ -38,6 +40,7 @@
|
|||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -47,6 +50,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -56,6 +60,8 @@
|
|||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||||
|
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
@@ -63,8 +69,8 @@
|
|||||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||||
@@ -86,15 +92,18 @@
|
|||||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||||
| `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
|
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||||
| `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
|
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. |
|
||||||
|
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling. |
|
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
|
||||||
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
|
||||||
@@ -132,10 +141,10 @@
|
|||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/object_cache/write`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
@@ -143,15 +152,16 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -168,6 +178,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -212,9 +224,11 @@
|
|||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
|
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||||
|
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
@@ -225,6 +239,7 @@
|
|||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `mysql.tls` | -- | -- | -- |
|
| `mysql.tls` | -- | -- | -- |
|
||||||
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
|
||||||
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -234,6 +249,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
|
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -243,6 +259,8 @@
|
|||||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||||
|
| `jaeger` | -- | -- | Jaeger protocol options. |
|
||||||
|
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
|
||||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||||
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
|
||||||
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
|
||||||
@@ -292,13 +310,16 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
|
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store. |
|
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `backend` | String | `EtcdStore` | The datastore for meta server. |
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store` |
|
||||||
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
|
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
|
||||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||||
|
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
@@ -323,7 +344,7 @@
|
|||||||
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
|
||||||
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
| `wal.num_topics` | Integer | `64` | Number of topics. |
|
||||||
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
|
||||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||||
@@ -366,19 +387,14 @@
|
|||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
||||||
| `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
|
|
||||||
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
|
|
||||||
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
|
|
||||||
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
|
||||||
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
@@ -466,10 +482,10 @@
|
|||||||
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
|
||||||
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. |
|
||||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.experimental_write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
@@ -477,15 +493,16 @@
|
|||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
|
||||||
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
|
||||||
|
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
|
||||||
|
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
||||||
|
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
||||||
|
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
||||||
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
|
||||||
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
|
||||||
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
|
||||||
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
|
|
||||||
| `region_engine.mito.inverted_index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
|
|
||||||
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
|
||||||
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
|
||||||
@@ -502,6 +519,8 @@
|
|||||||
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
|
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -534,12 +553,18 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||||
|
| `flow` | -- | -- | flow engine options. |
|
||||||
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
|
||||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||||
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
|
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||||
|
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||||
| `meta_client` | -- | -- | The metasrv client options. |
|
| `meta_client` | -- | -- | The metasrv client options. |
|
||||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||||
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
| `meta_client.timeout` | String | `3s` | Operation timeout. |
|
||||||
|
|||||||
@@ -19,26 +19,6 @@ init_regions_parallelism = 16
|
|||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Deprecated, use `grpc.addr` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_addr = "127.0.0.1:3001"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.hostname` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_hostname = "127.0.0.1"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.runtime_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_runtime_size = 8
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_max_recv_message_size = "512MB"
|
|
||||||
|
|
||||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
|
||||||
## @toml2docs:none-default
|
|
||||||
rpc_max_send_message_size = "512MB"
|
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
@@ -56,10 +36,11 @@ body_limit = "64MB"
|
|||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:3001"
|
bind_addr = "127.0.0.1:3001"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||||
## and used for connections from outside the host
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
hostname = "127.0.0.1"
|
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||||
|
server_addr = "127.0.0.1:3001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
@@ -250,6 +231,7 @@ overwrite_entry_start_id = false
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -475,18 +457,18 @@ auto_flush_interval = "1h"
|
|||||||
## @toml2docs:none-default="Auto"
|
## @toml2docs:none-default="Auto"
|
||||||
#+ selector_result_cache_size = "512MB"
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "5GiB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
experimental_write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
@@ -516,6 +498,20 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## The TTL of the staging directory.
|
||||||
|
## Defaults to 7 days.
|
||||||
|
## Setting it to "0s" to disable TTL.
|
||||||
|
staging_ttl = "7d"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -543,15 +539,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -622,6 +609,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
@@ -5,13 +5,19 @@ mode = "distributed"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 14
|
node_id = 14
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:6800"
|
bind_addr = "127.0.0.1:6800"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv,
|
||||||
## and used for connections from outside the host
|
## and used for connections from outside the host
|
||||||
hostname = "127.0.0.1"
|
server_addr = "127.0.0.1:6800"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
## The maximum receive message size for gRPC server.
|
## The maximum receive message size for gRPC server.
|
||||||
@@ -19,6 +25,16 @@ max_recv_message_size = "512MB"
|
|||||||
## The maximum send message size for gRPC server.
|
## The maximum send message size for gRPC server.
|
||||||
max_send_message_size = "512MB"
|
max_send_message_size = "512MB"
|
||||||
|
|
||||||
|
## The HTTP server options.
|
||||||
|
[http]
|
||||||
|
## The address to bind the HTTP server.
|
||||||
|
addr = "127.0.0.1:4000"
|
||||||
|
## HTTP request timeout. Set to 0 to disable timeout.
|
||||||
|
timeout = "30s"
|
||||||
|
## HTTP request body limit.
|
||||||
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
|
## Set to 0 to disable limit.
|
||||||
|
body_limit = "64MB"
|
||||||
|
|
||||||
## The metasrv client options.
|
## The metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
|
|||||||
@@ -31,14 +31,21 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
bind_addr = "127.0.0.1:4001"
|
||||||
## The hostname advertised to the metasrv,
|
## The address advertised to the metasrv, and used for connections from outside the host.
|
||||||
## and used for connections from outside the host
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
hostname = "127.0.0.1"
|
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
|
||||||
|
server_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
@@ -67,6 +74,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -98,6 +108,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -125,6 +138,11 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
|
## Jaeger protocol options.
|
||||||
|
[jaeger]
|
||||||
|
## Whether to enable Jaeger protocol in HTTP API.
|
||||||
|
enable = true
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
|
|||||||
@@ -4,17 +4,35 @@ data_home = "/tmp/metasrv/"
|
|||||||
## The bind address of metasrv.
|
## The bind address of metasrv.
|
||||||
bind_addr = "127.0.0.1:3002"
|
bind_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
|
## The communication server address for the frontend and datanode to connect to metasrv.
|
||||||
|
## If left empty or unset, the server will automatically use the IP address of the first network interface
|
||||||
|
## on the host, with the same port number as the one specified in `bind_addr`.
|
||||||
server_addr = "127.0.0.1:3002"
|
server_addr = "127.0.0.1:3002"
|
||||||
|
|
||||||
## Store server address default to etcd store.
|
## Store server address default to etcd store.
|
||||||
|
## For postgres store, the format is:
|
||||||
|
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
||||||
|
## For etcd store, the format is:
|
||||||
|
## "127.0.0.1:2379"
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
store_key_prefix = ""
|
store_key_prefix = ""
|
||||||
|
|
||||||
## The datastore for meta server.
|
## The datastore for meta server.
|
||||||
backend = "EtcdStore"
|
## Available values:
|
||||||
|
## - `etcd_store` (default value)
|
||||||
|
## - `memory_store`
|
||||||
|
## - `postgres_store`
|
||||||
|
backend = "etcd_store"
|
||||||
|
|
||||||
|
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
|
||||||
|
## **Only used when backend is `postgres_store`.**
|
||||||
|
meta_table_name = "greptime_metakv"
|
||||||
|
|
||||||
|
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
|
||||||
|
## Only used when backend is `postgres_store`.
|
||||||
|
meta_election_lock_id = 1
|
||||||
|
|
||||||
## Datanode selector type.
|
## Datanode selector type.
|
||||||
## - `round_robin` (default value)
|
## - `round_robin` (default value)
|
||||||
@@ -32,6 +50,9 @@ use_memory_store = false
|
|||||||
## - Using shared storage (e.g., s3).
|
## - Using shared storage (e.g., s3).
|
||||||
enable_region_failover = false
|
enable_region_failover = false
|
||||||
|
|
||||||
|
## Max allowed idle time before removing node info from metasrv memory.
|
||||||
|
node_max_idle_time = "24hours"
|
||||||
|
|
||||||
## Whether to enable greptimedb telemetry. Enabled by default.
|
## Whether to enable greptimedb telemetry. Enabled by default.
|
||||||
#+ enable_telemetry = true
|
#+ enable_telemetry = true
|
||||||
|
|
||||||
@@ -113,6 +134,8 @@ num_topics = 64
|
|||||||
selector_type = "round_robin"
|
selector_type = "round_robin"
|
||||||
|
|
||||||
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
## Only accepts strings that match the following regular expression pattern:
|
||||||
|
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
|
||||||
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
|
||||||
topic_name_prefix = "greptimedb_wal_topic"
|
topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
|
||||||
|
|||||||
@@ -39,11 +39,17 @@ timeout = "30s"
|
|||||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||||
## Set to 0 to disable limit.
|
## Set to 0 to disable limit.
|
||||||
body_limit = "64MB"
|
body_limit = "64MB"
|
||||||
|
## HTTP CORS support, it's turned on by default
|
||||||
|
## This allows browser to access http APIs without CORS restrictions
|
||||||
|
enable_cors = true
|
||||||
|
## Customize allowed origins for HTTP CORS.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
cors_allowed_origins = ["https://example.com"]
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
addr = "127.0.0.1:4001"
|
bind_addr = "127.0.0.1:4001"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
@@ -72,6 +78,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4002"
|
addr = "127.0.0.1:4002"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
# MySQL server TLS options.
|
# MySQL server TLS options.
|
||||||
[mysql.tls]
|
[mysql.tls]
|
||||||
@@ -103,6 +112,9 @@ enable = true
|
|||||||
addr = "127.0.0.1:4003"
|
addr = "127.0.0.1:4003"
|
||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
## Server-side keep-alive time.
|
||||||
|
## Set to 0 (default) to disable.
|
||||||
|
keep_alive = "0s"
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
@@ -130,6 +142,11 @@ enable = true
|
|||||||
## Whether to enable InfluxDB protocol in HTTP API.
|
## Whether to enable InfluxDB protocol in HTTP API.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
|
## Jaeger protocol options.
|
||||||
|
[jaeger]
|
||||||
|
## Whether to enable Jaeger protocol in HTTP API.
|
||||||
|
enable = true
|
||||||
|
|
||||||
## Prometheus remote storage options
|
## Prometheus remote storage options
|
||||||
[prom_store]
|
[prom_store]
|
||||||
## Whether to enable Prometheus remote write and read in HTTP API.
|
## Whether to enable Prometheus remote write and read in HTTP API.
|
||||||
@@ -153,11 +170,11 @@ dir = "/tmp/greptimedb/wal"
|
|||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
file_size = "128MB"
|
file_size = "128MB"
|
||||||
|
|
||||||
## The threshold of the WAL size to trigger a flush.
|
## The threshold of the WAL size to trigger a purge.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_threshold = "1GB"
|
purge_threshold = "1GB"
|
||||||
|
|
||||||
## The interval to trigger a flush.
|
## The interval to trigger a purge.
|
||||||
## **It's only used when the provider is `raft_engine`**.
|
## **It's only used when the provider is `raft_engine`**.
|
||||||
purge_interval = "1m"
|
purge_interval = "1m"
|
||||||
|
|
||||||
@@ -272,10 +289,12 @@ overwrite_entry_start_id = false
|
|||||||
|
|
||||||
## Metadata storage options.
|
## Metadata storage options.
|
||||||
[metadata_store]
|
[metadata_store]
|
||||||
## Kv file size in bytes.
|
## The size of the metadata store log file.
|
||||||
file_size = "256MB"
|
file_size = "64MB"
|
||||||
## Kv purge threshold.
|
## The threshold of the metadata store size to trigger a purge.
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "256MB"
|
||||||
|
## The interval of the metadata store to trigger a purge.
|
||||||
|
purge_interval = "1m"
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
@@ -284,6 +303,12 @@ max_retry_times = 3
|
|||||||
## Initial retry delay of procedures, increases exponentially
|
## Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
## flow engine options.
|
||||||
|
[flow]
|
||||||
|
## The number of flow worker in flownode.
|
||||||
|
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
|
||||||
|
#+num_workers=0
|
||||||
|
|
||||||
# Example of using S3 as the storage.
|
# Example of using S3 as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
# type = "S3"
|
# type = "S3"
|
||||||
@@ -293,6 +318,7 @@ retry_delay = "500ms"
|
|||||||
# secret_access_key = "123456"
|
# secret_access_key = "123456"
|
||||||
# endpoint = "https://s3.amazonaws.com"
|
# endpoint = "https://s3.amazonaws.com"
|
||||||
# region = "us-west-2"
|
# region = "us-west-2"
|
||||||
|
# enable_virtual_host_style = false
|
||||||
|
|
||||||
# Example of using Oss as the storage.
|
# Example of using Oss as the storage.
|
||||||
# [storage]
|
# [storage]
|
||||||
@@ -337,7 +363,7 @@ data_home = "/tmp/greptimedb/"
|
|||||||
type = "File"
|
type = "File"
|
||||||
|
|
||||||
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
|
||||||
## A local file directory, defaults to `{data_home}/object_cache/read`. An empty string means disabling.
|
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ cache_path = ""
|
#+ cache_path = ""
|
||||||
|
|
||||||
@@ -518,18 +544,18 @@ auto_flush_interval = "1h"
|
|||||||
## @toml2docs:none-default="Auto"
|
## @toml2docs:none-default="Auto"
|
||||||
#+ selector_result_cache_size = "512MB"
|
#+ selector_result_cache_size = "512MB"
|
||||||
|
|
||||||
## Whether to enable the experimental write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance.
|
||||||
enable_experimental_write_cache = false
|
enable_write_cache = false
|
||||||
|
|
||||||
## File system path for write cache, defaults to `{data_home}/object_cache/write`.
|
## File system path for write cache, defaults to `{data_home}`.
|
||||||
experimental_write_cache_path = ""
|
write_cache_path = ""
|
||||||
|
|
||||||
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger.
|
||||||
experimental_write_cache_size = "5GiB"
|
write_cache_size = "5GiB"
|
||||||
|
|
||||||
## TTL for write cache.
|
## TTL for write cache.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
experimental_write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
@@ -559,6 +585,20 @@ aux_path = ""
|
|||||||
## The max capacity of the staging directory.
|
## The max capacity of the staging directory.
|
||||||
staging_size = "2GB"
|
staging_size = "2GB"
|
||||||
|
|
||||||
|
## The TTL of the staging directory.
|
||||||
|
## Defaults to 7 days.
|
||||||
|
## Setting it to "0s" to disable TTL.
|
||||||
|
staging_ttl = "7d"
|
||||||
|
|
||||||
|
## Cache size for inverted index metadata.
|
||||||
|
metadata_cache_size = "64MiB"
|
||||||
|
|
||||||
|
## Cache size for inverted index content.
|
||||||
|
content_cache_size = "128MiB"
|
||||||
|
|
||||||
|
## Page size for inverted index content cache.
|
||||||
|
content_cache_page_size = "64KiB"
|
||||||
|
|
||||||
## The options for inverted index in Mito engine.
|
## The options for inverted index in Mito engine.
|
||||||
[region_engine.mito.inverted_index]
|
[region_engine.mito.inverted_index]
|
||||||
|
|
||||||
@@ -586,15 +626,6 @@ mem_threshold_on_create = "auto"
|
|||||||
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
## Deprecated, use `region_engine.mito.index.aux_path` instead.
|
||||||
intermediate_path = ""
|
intermediate_path = ""
|
||||||
|
|
||||||
## Cache size for inverted index metadata.
|
|
||||||
metadata_cache_size = "64MiB"
|
|
||||||
|
|
||||||
## Cache size for inverted index content.
|
|
||||||
content_cache_size = "128MiB"
|
|
||||||
|
|
||||||
## Page size for inverted index content cache.
|
|
||||||
content_cache_page_size = "64KiB"
|
|
||||||
|
|
||||||
## The options for full-text index in Mito engine.
|
## The options for full-text index in Mito engine.
|
||||||
[region_engine.mito.fulltext_index]
|
[region_engine.mito.fulltext_index]
|
||||||
|
|
||||||
@@ -665,6 +696,12 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
## Enable the file engine.
|
## Enable the file engine.
|
||||||
[region_engine.file]
|
[region_engine.file]
|
||||||
|
|
||||||
|
[[region_engine]]
|
||||||
|
## Metric engine options.
|
||||||
|
[region_engine.metric]
|
||||||
|
## Whether to enable the experimental sparse primary key encoding.
|
||||||
|
experimental_sparse_primary_key_encoding = false
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:20.04 as builder
|
FROM ubuntu:22.04 as builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
@@ -7,10 +7,8 @@ ARG OUTPUT_DIR
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:latest
|
||||||
|
|
||||||
# The binary name of GreptimeDB executable.
|
# The binary name of GreptimeDB executable.
|
||||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
|||||||
@@ -9,16 +9,20 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
|
|||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
protobuf-compiler \
|
|
||||||
curl \
|
curl \
|
||||||
git \
|
git \
|
||||||
|
unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3 \
|
|
||||||
python3-dev \
|
# Install protoc
|
||||||
python3-pip \
|
ARG PROTOBUF_VERSION=29.3
|
||||||
&& pip3 install --upgrade pip \
|
|
||||||
&& pip3 install pyarrow
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Trust workdir
|
# Trust workdir
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|||||||
@@ -12,18 +12,21 @@ RUN yum install -y epel-release \
|
|||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
centos-release-scl \
|
centos-release-scl \
|
||||||
rh-python38 \
|
|
||||||
rh-python38-python-devel \
|
|
||||||
which
|
which
|
||||||
|
|
||||||
# Install protoc
|
# Install protoc
|
||||||
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
|
ARG PROTOBUF_VERSION=29.3
|
||||||
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|
||||||
|
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
|
||||||
|
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Install Rust toolchains.
|
# Install Rust toolchains.
|
||||||
ARG RUST_TOOLCHAIN
|
ARG RUST_TOOLCHAIN
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
# The root path under which contains all the dependencies to build this Dockerfile.
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
ARG DOCKER_BUILD_ROOT=.
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
@@ -6,11 +6,8 @@ ARG DOCKER_BUILD_ROOT=.
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Add PPA for Python 3.10.
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
add-apt-repository ppa:deadsnakes/ppa -y
|
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
@@ -20,55 +17,36 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config
|
||||||
python3.10 \
|
|
||||||
python3.10-dev
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN echo "target platform: $TARGETPLATFORM"
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
|
|
||||||
|
ARG PROTOBUF_VERSION=29.3
|
||||||
|
|
||||||
# Install protobuf, because the one in the apt is too old (v3.12).
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-aarch_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||||
unzip protoc-29.1-linux-aarch_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||||
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v29.1/protoc-29.1-linux-x86_64.zip && \
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
unzip protoc-29.1-linux-x86_64.zip -d protoc3; \
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||||
fi
|
fi
|
||||||
RUN mv protoc3/bin/* /usr/local/bin/
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
RUN mv protoc3/include/* /usr/local/include/
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
# https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
|
|
||||||
# `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
|
|
||||||
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
|
|
||||||
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
|
|
||||||
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
|
|
||||||
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
|
|
||||||
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
|
|
||||||
|
|
||||||
# Remove Python 3.8 and install pip.
|
|
||||||
RUN apt-get -y purge python3.8 && \
|
|
||||||
apt-get -y autoremove && \
|
|
||||||
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
|
||||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
|
||||||
|
|
||||||
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||||
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
||||||
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
||||||
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
||||||
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||||
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
# it can be a different user that have prepared the submodules.
|
# it can be a different user that have prepared the submodules.
|
||||||
RUN git config --global --add safe.directory '*'
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
# Install Python dependencies.
|
|
||||||
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
|
|
||||||
RUN python3 -m pip install -r /etc/greptime/requirements.txt
|
|
||||||
|
|
||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
|||||||
@@ -1,51 +0,0 @@
|
|||||||
# Use the legacy glibc 2.28.
|
|
||||||
FROM ubuntu:18.10
|
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
|
||||||
WORKDIR /greptimedb
|
|
||||||
|
|
||||||
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
|
||||||
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
|
||||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
|
||||||
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
|
||||||
|
|
||||||
# Install dependencies.
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|
||||||
libssl-dev \
|
|
||||||
tzdata \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
git \
|
|
||||||
build-essential \
|
|
||||||
unzip \
|
|
||||||
pkg-config
|
|
||||||
|
|
||||||
# Install protoc.
|
|
||||||
ENV PROTOC_VERSION=25.1
|
|
||||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
|
||||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
|
||||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
|
||||||
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
|
||||||
else \
|
|
||||||
echo "Unsupported architecture"; exit 1; \
|
|
||||||
fi && \
|
|
||||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
|
||||||
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
|
||||||
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
|
||||||
rm -f ${PROTOC_ZIP}
|
|
||||||
|
|
||||||
# Install Rust.
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
|
||||||
ENV PATH /root/.cargo/bin/:$PATH
|
|
||||||
|
|
||||||
# Install Rust toolchains.
|
|
||||||
ARG RUST_TOOLCHAIN
|
|
||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
|
||||||
|
|
||||||
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
|
||||||
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
|
||||||
RUN cargo install cargo-binstall --version 1.6.6 --locked
|
|
||||||
|
|
||||||
# Install nextest.
|
|
||||||
RUN cargo binstall cargo-nextest --no-confirm
|
|
||||||
66
docker/dev-builder/ubuntu/Dockerfile-20.04
Normal file
66
docker/dev-builder/ubuntu/Dockerfile-20.04
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
|
# Install dependencies.
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
unzip \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
RUN echo "target platform: $TARGETPLATFORM"
|
||||||
|
|
||||||
|
ARG PROTOBUF_VERSION=29.3
|
||||||
|
|
||||||
|
# Install protobuf, because the one in the apt is too old (v3.12).
|
||||||
|
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
|
||||||
|
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
|
||||||
|
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
|
||||||
|
fi
|
||||||
|
RUN mv protoc3/bin/* /usr/local/bin/
|
||||||
|
RUN mv protoc3/include/* /usr/local/include/
|
||||||
|
|
||||||
|
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
|
||||||
|
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
|
||||||
|
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
|
||||||
|
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
|
||||||
|
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
|
||||||
|
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
|
||||||
|
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
|
||||||
|
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
|
||||||
|
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
|
||||||
|
# it can be a different user that have prepared the submodules.
|
||||||
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Install Rust toolchains.
|
||||||
|
ARG RUST_TOOLCHAIN
|
||||||
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
|
||||||
|
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
|
||||||
|
# compile from source take too long, so we use the precompiled binary instead
|
||||||
|
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
|
||||||
|
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
|
||||||
|
|
||||||
|
# Install nextest.
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
@@ -39,14 +39,16 @@ services:
|
|||||||
container_name: metasrv
|
container_name: metasrv
|
||||||
ports:
|
ports:
|
||||||
- 3002:3002
|
- 3002:3002
|
||||||
|
- 3000:3000
|
||||||
command:
|
command:
|
||||||
- metasrv
|
- metasrv
|
||||||
- start
|
- start
|
||||||
- --bind-addr=0.0.0.0:3002
|
- --rpc-bind-addr=0.0.0.0:3002
|
||||||
- --server-addr=metasrv:3002
|
- --rpc-server-addr=metasrv:3002
|
||||||
- --store-addrs=etcd0:2379
|
- --store-addrs=etcd0:2379
|
||||||
|
- --http-addr=0.0.0.0:3000
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -66,17 +68,17 @@ services:
|
|||||||
- datanode
|
- datanode
|
||||||
- start
|
- start
|
||||||
- --node-id=0
|
- --node-id=0
|
||||||
- --rpc-addr=0.0.0.0:3001
|
- --rpc-bind-addr=0.0.0.0:3001
|
||||||
- --rpc-hostname=datanode0:3001
|
- --rpc-server-addr=datanode0:3001
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --http-addr=0.0.0.0:5000
|
- --http-addr=0.0.0.0:5000
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
|
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 5
|
retries: 10
|
||||||
depends_on:
|
depends_on:
|
||||||
metasrv:
|
metasrv:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -96,7 +98,7 @@ services:
|
|||||||
- start
|
- start
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --http-addr=0.0.0.0:4000
|
- --http-addr=0.0.0.0:4000
|
||||||
- --rpc-addr=0.0.0.0:4001
|
- --rpc-bind-addr=0.0.0.0:4001
|
||||||
- --mysql-addr=0.0.0.0:4002
|
- --mysql-addr=0.0.0.0:4002
|
||||||
- --postgres-addr=0.0.0.0:4003
|
- --postgres-addr=0.0.0.0:4003
|
||||||
healthcheck:
|
healthcheck:
|
||||||
@@ -115,16 +117,23 @@ services:
|
|||||||
container_name: flownode0
|
container_name: flownode0
|
||||||
ports:
|
ports:
|
||||||
- 4004:4004
|
- 4004:4004
|
||||||
|
- 4005:4005
|
||||||
command:
|
command:
|
||||||
- flownode
|
- flownode
|
||||||
- start
|
- start
|
||||||
- --node-id=0
|
- --node-id=0
|
||||||
- --metasrv-addrs=metasrv:3002
|
- --metasrv-addrs=metasrv:3002
|
||||||
- --rpc-addr=0.0.0.0:4004
|
- --rpc-bind-addr=0.0.0.0:4004
|
||||||
- --rpc-hostname=flownode0:4004
|
- --rpc-server-addr=flownode0:4004
|
||||||
|
- --http-addr=0.0.0.0:4005
|
||||||
depends_on:
|
depends_on:
|
||||||
frontend0:
|
frontend0:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
- greptimedb
|
- greptimedb
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
numpy>=1.24.2
|
|
||||||
pandas>=1.5.3
|
|
||||||
pyarrow>=11.0.0
|
|
||||||
requests>=2.28.2
|
|
||||||
scipy>=1.10.1
|
|
||||||
40
docs/benchmarks/tsbs/v0.12.0.md
Normal file
40
docs/benchmarks/tsbs/v0.12.0.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# TSBS benchmark - v0.12.0
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
### Amazon EC2
|
||||||
|
|
||||||
|
| | |
|
||||||
|
|---------|-------------------------|
|
||||||
|
| Machine | c5d.2xlarge |
|
||||||
|
| CPU | 8 core |
|
||||||
|
| Memory | 16GB |
|
||||||
|
| Disk | 100GB (GP3) |
|
||||||
|
| OS | Ubuntu Server 24.04 LTS |
|
||||||
|
|
||||||
|
## Write performance
|
||||||
|
|
||||||
|
| Environment | Ingest rate (rows/s) |
|
||||||
|
|-----------------|----------------------|
|
||||||
|
| EC2 c5d.2xlarge | 326839.28 |
|
||||||
|
|
||||||
|
## Query performance
|
||||||
|
|
||||||
|
| Query type | EC2 c5d.2xlarge (ms) |
|
||||||
|
|-----------------------|----------------------|
|
||||||
|
| cpu-max-all-1 | 12.46 |
|
||||||
|
| cpu-max-all-8 | 24.20 |
|
||||||
|
| double-groupby-1 | 673.08 |
|
||||||
|
| double-groupby-5 | 963.99 |
|
||||||
|
| double-groupby-all | 1330.05 |
|
||||||
|
| groupby-orderby-limit | 952.46 |
|
||||||
|
| high-cpu-1 | 5.08 |
|
||||||
|
| high-cpu-all | 4638.57 |
|
||||||
|
| lastpoint | 591.02 |
|
||||||
|
| single-groupby-1-1-1 | 4.06 |
|
||||||
|
| single-groupby-1-1-12 | 4.73 |
|
||||||
|
| single-groupby-1-8-1 | 8.23 |
|
||||||
|
| single-groupby-5-1-1 | 4.61 |
|
||||||
|
| single-groupby-5-1-12 | 5.61 |
|
||||||
|
| single-groupby-5-8-1 | 9.74 |
|
||||||
|
|
||||||
@@ -4,6 +4,16 @@ This crate provides an easy approach to dump memory profiling info.
|
|||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
### jemalloc
|
### jemalloc
|
||||||
|
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
|
||||||
|
```
|
||||||
|
# find jeprof binary
|
||||||
|
find . -name 'jeprof'
|
||||||
|
# add executable permission
|
||||||
|
chmod +x <path_to_jeprof>
|
||||||
|
```
|
||||||
|
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
|
||||||
|
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
|
||||||
|
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
|
||||||
```bash
|
```bash
|
||||||
# for macOS
|
# for macOS
|
||||||
brew install jemalloc
|
brew install jemalloc
|
||||||
@@ -23,7 +33,11 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
|
|||||||
Start GreptimeDB instance with environment variables:
|
Start GreptimeDB instance with environment variables:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# for Linux
|
||||||
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
|
|
||||||
|
# for macOS
|
||||||
|
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
Dump memory profiling data through HTTP API:
|
Dump memory profiling data through HTTP API:
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 25 KiB |
BIN
docs/logo-text-padding.png
Executable file → Normal file
BIN
docs/logo-text-padding.png
Executable file → Normal file
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 21 KiB |
100
flake.lock
generated
Normal file
100
flake.lock
generated
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737613896,
|
||||||
|
"narHash": "sha256-ldqXIglq74C7yKMFUzrS9xMT/EVs26vZpOD68Sh7OcU=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "303a062fdd8e89f233db05868468975d17855d80",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1731533236,
|
||||||
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737569578,
|
||||||
|
"narHash": "sha256-6qY0pk2QmUtBT9Mywdvif0i/CLVgpCjMUn6g9vB+f3M=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "47addd76727f42d351590c905d9d1905ca895b82",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1737581772,
|
||||||
|
"narHash": "sha256-t1P2Pe3FAX9TlJsCZbmJ3wn+C4qr6aSMypAOu8WNsN0=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "582af7ee9c8d84f5d534272fc7de9f292bd849be",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
56
flake.nix
Normal file
56
flake.nix
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
description = "Development environment flake";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, fenix, flake-utils }:
|
||||||
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
libgit2
|
||||||
|
libz
|
||||||
|
];
|
||||||
|
lib = nixpkgs.lib;
|
||||||
|
rustToolchain = fenix.packages.${system}.fromToolchainName {
|
||||||
|
name = (lib.importTOML ./rust-toolchain.toml).toolchain.channel;
|
||||||
|
sha256 = "sha256-f/CVA1EC61EWbh0SjaRNhLL0Ypx2ObupbzigZp8NmL4=";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
pkg-config
|
||||||
|
git
|
||||||
|
clang
|
||||||
|
gcc
|
||||||
|
protobuf
|
||||||
|
gnumake
|
||||||
|
mold
|
||||||
|
(rustToolchain.withComponents [
|
||||||
|
"cargo"
|
||||||
|
"clippy"
|
||||||
|
"rust-src"
|
||||||
|
"rustc"
|
||||||
|
"rustfmt"
|
||||||
|
"rust-analyzer"
|
||||||
|
"llvm-tools"
|
||||||
|
])
|
||||||
|
cargo-nextest
|
||||||
|
cargo-llvm-cov
|
||||||
|
taplo
|
||||||
|
curl
|
||||||
|
gnuplot ## for cargo bench
|
||||||
|
];
|
||||||
|
|
||||||
|
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
19
grafana/check.sh
Executable file
19
grafana/check.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
BASEDIR=$(dirname "$0")
|
||||||
|
|
||||||
|
# Use jq to check for panels with empty or missing descriptions
|
||||||
|
invalid_panels=$(cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||||
|
.panels[]
|
||||||
|
| select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))
|
||||||
|
')
|
||||||
|
|
||||||
|
# Check if any invalid panels were found
|
||||||
|
if [[ -n "$invalid_panels" ]]; then
|
||||||
|
echo "Error: The following panels have empty or missing descriptions:"
|
||||||
|
echo "$invalid_panels"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
11
grafana/summary.sh
Executable file
11
grafana/summary.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
BASEDIR=$(dirname "$0")
|
||||||
|
echo '| Title | Description | Expressions |
|
||||||
|
|---|---|---|'
|
||||||
|
|
||||||
|
cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||||
|
.panels |
|
||||||
|
map(select(.type == "stat" or .type == "timeseries")) |
|
||||||
|
.[] | "| \(.title) | \(.description | gsub("\n"; "<br>")) | \(.targets | map(.expr // .rawSql | "`\(.|gsub("\n"; "<br>"))`") | join("<br>")) |"
|
||||||
|
'
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-10-19"
|
channel = "nightly-2024-12-25"
|
||||||
components = ["rust-analyzer", "llvm-tools"]
|
|
||||||
|
|||||||
31
shell.nix
31
shell.nix
@@ -1,31 +0,0 @@
|
|||||||
let
|
|
||||||
nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/tarball/nixos-24.11";
|
|
||||||
fenix = import (fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz") {};
|
|
||||||
pkgs = import nixpkgs { config = {}; overlays = []; };
|
|
||||||
in
|
|
||||||
|
|
||||||
pkgs.mkShell rec {
|
|
||||||
nativeBuildInputs = with pkgs; [
|
|
||||||
pkg-config
|
|
||||||
git
|
|
||||||
clang
|
|
||||||
gcc
|
|
||||||
protobuf
|
|
||||||
gnumake
|
|
||||||
mold
|
|
||||||
(fenix.fromToolchainFile {
|
|
||||||
dir = ./.;
|
|
||||||
})
|
|
||||||
cargo-nextest
|
|
||||||
cargo-llvm-cov
|
|
||||||
taplo
|
|
||||||
curl
|
|
||||||
];
|
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
|
||||||
libgit2
|
|
||||||
libz
|
|
||||||
];
|
|
||||||
|
|
||||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
|
||||||
}
|
|
||||||
@@ -15,13 +15,10 @@ common-macro.workspace = true
|
|||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
paste = "1.0"
|
paste.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = "0.11"
|
tonic-build = "0.11"
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
paste = "1.0"
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: prost::DecodeError,
|
error: prost::UnknownEnumValue,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
|
|||||||
@@ -19,9 +19,7 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
|
|||||||
use common_decimal::Decimal128;
|
use common_decimal::Decimal128;
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use common_time::{
|
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||||
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
|
|
||||||
};
|
|
||||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||||
use datatypes::scalars::ScalarVector;
|
use datatypes::scalars::ScalarVector;
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
@@ -29,8 +27,8 @@ use datatypes::types::{
|
|||||||
};
|
};
|
||||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
BinaryVector, BooleanVector, DateVector, Decimal128Vector, Float32Vector, Float64Vector,
|
||||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||||
@@ -86,7 +84,7 @@ impl ColumnDataTypeWrapper {
|
|||||||
|
|
||||||
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
(self.datatype, self.datatype_ext.clone())
|
(self.datatype, self.datatype_ext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,7 +116,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
||||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
ColumnDataType::Datetime => ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
|
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
|
||||||
ColumnDataType::TimestampMillisecond => {
|
ColumnDataType::TimestampMillisecond => {
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
ConcreteDataType::timestamp_millisecond_datatype()
|
||||||
@@ -271,7 +269,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
|
||||||
ConcreteDataType::Timestamp(t) => match t {
|
ConcreteDataType::Timestamp(t) => match t {
|
||||||
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
|
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
|
||||||
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
|
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
|
||||||
@@ -476,7 +473,6 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
|||||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||||
Value::Date(val) => values.date_values.push(val.val()),
|
Value::Date(val) => values.date_values.push(val.val()),
|
||||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
|
||||||
Value::Timestamp(val) => match val.unit() {
|
Value::Timestamp(val) => match val.unit() {
|
||||||
TimeUnit::Second => values.timestamp_second_values.push(val.value()),
|
TimeUnit::Second => values.timestamp_second_values.push(val.value()),
|
||||||
TimeUnit::Millisecond => values.timestamp_millisecond_values.push(val.value()),
|
TimeUnit::Millisecond => values.timestamp_millisecond_values.push(val.value()),
|
||||||
@@ -577,12 +573,11 @@ pub fn pb_value_to_value_ref<'a>(
|
|||||||
ValueData::BinaryValue(bytes) => ValueRef::Binary(bytes.as_slice()),
|
ValueData::BinaryValue(bytes) => ValueRef::Binary(bytes.as_slice()),
|
||||||
ValueData::StringValue(string) => ValueRef::String(string.as_str()),
|
ValueData::StringValue(string) => ValueRef::String(string.as_str()),
|
||||||
ValueData::DateValue(d) => ValueRef::Date(Date::from(*d)),
|
ValueData::DateValue(d) => ValueRef::Date(Date::from(*d)),
|
||||||
ValueData::DatetimeValue(d) => ValueRef::DateTime(DateTime::new(*d)),
|
|
||||||
ValueData::TimestampSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
|
ValueData::TimestampSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
|
||||||
ValueData::TimestampMillisecondValue(t) => {
|
ValueData::TimestampMillisecondValue(t) => {
|
||||||
ValueRef::Timestamp(Timestamp::new_millisecond(*t))
|
ValueRef::Timestamp(Timestamp::new_millisecond(*t))
|
||||||
}
|
}
|
||||||
ValueData::TimestampMicrosecondValue(t) => {
|
ValueData::DatetimeValue(t) | ValueData::TimestampMicrosecondValue(t) => {
|
||||||
ValueRef::Timestamp(Timestamp::new_microsecond(*t))
|
ValueRef::Timestamp(Timestamp::new_microsecond(*t))
|
||||||
}
|
}
|
||||||
ValueData::TimestampNanosecondValue(t) => {
|
ValueData::TimestampNanosecondValue(t) => {
|
||||||
@@ -651,7 +646,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
ConcreteDataType::Binary(_) => Arc::new(BinaryVector::from(values.binary_values)),
|
ConcreteDataType::Binary(_) => Arc::new(BinaryVector::from(values.binary_values)),
|
||||||
ConcreteDataType::String(_) => Arc::new(StringVector::from_vec(values.string_values)),
|
ConcreteDataType::String(_) => Arc::new(StringVector::from_vec(values.string_values)),
|
||||||
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
|
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
|
||||||
ConcreteDataType::DateTime(_) => Arc::new(DateTimeVector::from_vec(values.datetime_values)),
|
|
||||||
ConcreteDataType::Timestamp(unit) => match unit {
|
ConcreteDataType::Timestamp(unit) => match unit {
|
||||||
TimestampType::Second(_) => Arc::new(TimestampSecondVector::from_vec(
|
TimestampType::Second(_) => Arc::new(TimestampSecondVector::from_vec(
|
||||||
values.timestamp_second_values,
|
values.timestamp_second_values,
|
||||||
@@ -685,14 +679,18 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
IntervalType::YearMonth(_) => Arc::new(IntervalYearMonthVector::from_vec(
|
||||||
values.interval_year_month_values,
|
values.interval_year_month_values,
|
||||||
)),
|
)),
|
||||||
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_vec(
|
IntervalType::DayTime(_) => Arc::new(IntervalDayTimeVector::from_iter_values(
|
||||||
values.interval_day_time_values,
|
values
|
||||||
|
.interval_day_time_values
|
||||||
|
.iter()
|
||||||
|
.map(|x| IntervalDayTime::from_i64(*x).into()),
|
||||||
)),
|
)),
|
||||||
IntervalType::MonthDayNano(_) => {
|
IntervalType::MonthDayNano(_) => {
|
||||||
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
Arc::new(IntervalMonthDayNanoVector::from_iter_values(
|
||||||
values.interval_month_day_nano_values.iter().map(|x| {
|
values
|
||||||
IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).to_i128()
|
.interval_month_day_nano_values
|
||||||
}),
|
.iter()
|
||||||
|
.map(|x| IntervalMonthDayNano::new(x.months, x.days, x.nanoseconds).into()),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -783,11 +781,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|val| val.into())
|
.map(|val| val.into())
|
||||||
.collect(),
|
.collect(),
|
||||||
ConcreteDataType::DateTime(_) => values
|
|
||||||
.datetime_values
|
|
||||||
.into_iter()
|
|
||||||
.map(|v| Value::DateTime(v.into()))
|
|
||||||
.collect(),
|
|
||||||
ConcreteDataType::Date(_) => values
|
ConcreteDataType::Date(_) => values
|
||||||
.date_values
|
.date_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -943,9 +936,6 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
Value::Date(v) => v1::Value {
|
Value::Date(v) => v1::Value {
|
||||||
value_data: Some(ValueData::DateValue(v.val())),
|
value_data: Some(ValueData::DateValue(v.val())),
|
||||||
},
|
},
|
||||||
Value::DateTime(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::DatetimeValue(v.val())),
|
|
||||||
},
|
|
||||||
Value::Timestamp(v) => match v.unit() {
|
Value::Timestamp(v) => match v.unit() {
|
||||||
TimeUnit::Second => v1::Value {
|
TimeUnit::Second => v1::Value {
|
||||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
||||||
@@ -1062,7 +1052,6 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
|
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||||
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
|
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
|
||||||
Value::Date(v) => Some(ValueData::DateValue(v.val())),
|
Value::Date(v) => Some(ValueData::DateValue(v.val())),
|
||||||
Value::DateTime(v) => Some(ValueData::DatetimeValue(v.val())),
|
|
||||||
Value::Timestamp(v) => Some(match v.unit() {
|
Value::Timestamp(v) => Some(match v.unit() {
|
||||||
TimeUnit::Second => ValueData::TimestampSecondValue(v.value()),
|
TimeUnit::Second => ValueData::TimestampSecondValue(v.value()),
|
||||||
TimeUnit::Millisecond => ValueData::TimestampMillisecondValue(v.value()),
|
TimeUnit::Millisecond => ValueData::TimestampMillisecondValue(v.value()),
|
||||||
@@ -1244,7 +1233,7 @@ mod tests {
|
|||||||
ColumnDataTypeWrapper::date_datatype().into()
|
ColumnDataTypeWrapper::date_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::datetime_datatype(),
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
ColumnDataTypeWrapper::datetime_datatype().into()
|
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -1335,10 +1324,6 @@ mod tests {
|
|||||||
ColumnDataTypeWrapper::date_datatype(),
|
ColumnDataTypeWrapper::date_datatype(),
|
||||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
|
||||||
ColumnDataTypeWrapper::datetime_datatype(),
|
|
||||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
ConcreteDataType::timestamp_millisecond_datatype()
|
||||||
@@ -1495,14 +1480,22 @@ mod tests {
|
|||||||
column.values.as_ref().unwrap().interval_year_month_values
|
column.values.as_ref().unwrap().interval_year_month_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![4, 5, 6]));
|
let vector = Arc::new(IntervalDayTimeVector::from_vec(vec![
|
||||||
|
IntervalDayTime::new(0, 4).into(),
|
||||||
|
IntervalDayTime::new(0, 5).into(),
|
||||||
|
IntervalDayTime::new(0, 6).into(),
|
||||||
|
]));
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vec![4, 5, 6],
|
vec![4, 5, 6],
|
||||||
column.values.as_ref().unwrap().interval_day_time_values
|
column.values.as_ref().unwrap().interval_day_time_values
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![7, 8, 9]));
|
let vector = Arc::new(IntervalMonthDayNanoVector::from_vec(vec![
|
||||||
|
IntervalMonthDayNano::new(0, 0, 7).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 8).into(),
|
||||||
|
IntervalMonthDayNano::new(0, 0, 9).into(),
|
||||||
|
]));
|
||||||
let len = vector.len();
|
let len = vector.len();
|
||||||
push_vals(&mut column, 3, vector);
|
push_vals(&mut column, 3, vector);
|
||||||
(0..len).for_each(|i| {
|
(0..len).for_each(|i| {
|
||||||
@@ -1818,17 +1811,6 @@ mod tests {
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
test_convert_values!(
|
|
||||||
datetime,
|
|
||||||
vec![1.into(), 2.into(), 3.into()],
|
|
||||||
datetime,
|
|
||||||
vec![
|
|
||||||
Value::DateTime(1.into()),
|
|
||||||
Value::DateTime(2.into()),
|
|
||||||
Value::DateTime(3.into())
|
|
||||||
]
|
|
||||||
);
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_vectors_to_rows_for_different_types() {
|
fn test_vectors_to_rows_for_different_types() {
|
||||||
let boolean_vec = BooleanVector::from_vec(vec![true, false, true]);
|
let boolean_vec = BooleanVector::from_vec(vec![true, false, true]);
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, COMMENT_KEY,
|
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexOptions,
|
||||||
FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||||
};
|
};
|
||||||
use greptime_proto::v1::Analyzer;
|
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
@@ -34,10 +34,8 @@ const SKIPPING_INDEX_GRPC_KEY: &str = "skipping_index";
|
|||||||
|
|
||||||
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
/// Tries to construct a `ColumnSchema` from the given `ColumnDef`.
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(
|
let data_type =
|
||||||
column_def.data_type,
|
ColumnDataTypeWrapper::try_new(column_def.data_type, column_def.datatype_extension)?;
|
||||||
column_def.datatype_extension.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
@@ -57,13 +55,13 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
}
|
}
|
||||||
if let Some(options) = column_def.options.as_ref() {
|
if let Some(options) = column_def.options.as_ref() {
|
||||||
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
if let Some(fulltext) = options.options.get(FULLTEXT_GRPC_KEY) {
|
||||||
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.clone());
|
metadata.insert(FULLTEXT_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
if let Some(inverted_index) = options.options.get(INVERTED_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.clone());
|
metadata.insert(INVERTED_INDEX_KEY.to_string(), inverted_index.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.clone());
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +80,7 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
if let Some(fulltext) = column_schema.metadata().get(FULLTEXT_KEY) {
|
||||||
options
|
options
|
||||||
.options
|
.options
|
||||||
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.clone());
|
.insert(FULLTEXT_GRPC_KEY.to_string(), fulltext.to_owned());
|
||||||
}
|
}
|
||||||
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
if let Some(inverted_index) = column_schema.metadata().get(INVERTED_INDEX_KEY) {
|
||||||
options
|
options
|
||||||
@@ -102,7 +100,14 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
pub fn contains_fulltext(options: &Option<ColumnOptions>) -> bool {
|
||||||
options
|
options
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
.is_some_and(|o| o.options.contains_key(FULLTEXT_GRPC_KEY))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the `ColumnOptions` contains skipping index options.
|
||||||
|
pub fn contains_skipping(options: &Option<ColumnOptions>) -> bool {
|
||||||
|
options
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|o| o.options.contains_key(SKIPPING_INDEX_GRPC_KEY))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
/// Tries to construct a `ColumnOptions` from the given `FulltextOptions`.
|
||||||
@@ -115,6 +120,27 @@ pub fn options_from_fulltext(fulltext: &FulltextOptions) -> Result<Option<Column
|
|||||||
Ok((!options.options.is_empty()).then_some(options))
|
Ok((!options.options.is_empty()).then_some(options))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `ColumnOptions` from the given `SkippingIndexOptions`.
|
||||||
|
pub fn options_from_skipping(skipping: &SkippingIndexOptions) -> Result<Option<ColumnOptions>> {
|
||||||
|
let mut options = ColumnOptions::default();
|
||||||
|
|
||||||
|
let v = serde_json::to_string(skipping).context(error::SerializeJsonSnafu)?;
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), v);
|
||||||
|
|
||||||
|
Ok((!options.options.is_empty()).then_some(options))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `ColumnOptions` for inverted index.
|
||||||
|
pub fn options_from_inverted() -> ColumnOptions {
|
||||||
|
let mut options = ColumnOptions::default();
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string());
|
||||||
|
options
|
||||||
|
}
|
||||||
|
|
||||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||||
match analyzer {
|
match analyzer {
|
||||||
@@ -123,6 +149,13 @@ pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
|
||||||
|
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
|
||||||
|
match skipping_index_type {
|
||||||
|
PbSkippingIndexType::BloomFilter => SkippingIndexType::BloomFilter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -181,14 +214,14 @@ mod tests {
|
|||||||
let options = options_from_column_schema(&schema);
|
let options = options_from_column_schema(&schema);
|
||||||
assert!(options.is_none());
|
assert!(options.is_none());
|
||||||
|
|
||||||
let schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||||
.with_fulltext_options(FulltextOptions {
|
.with_fulltext_options(FulltextOptions {
|
||||||
enable: true,
|
enable: true,
|
||||||
analyzer: FulltextAnalyzer::English,
|
analyzer: FulltextAnalyzer::English,
|
||||||
case_sensitive: false,
|
case_sensitive: false,
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap();
|
||||||
.set_inverted_index(true);
|
schema.set_inverted_index(true);
|
||||||
let options = options_from_column_schema(&schema).unwrap();
|
let options = options_from_column_schema(&schema).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ api.workspace = true
|
|||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -31,7 +31,7 @@ common-version.workspace = true
|
|||||||
dashmap.workspace = true
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
humantime.workspace = true
|
humantime.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
@@ -39,7 +39,7 @@ lazy_static.workspace = true
|
|||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
moka = { workspace = true, features = ["future", "sync"] }
|
moka = { workspace = true, features = ["future", "sync"] }
|
||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
paste = "1.0"
|
paste.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
rustc-hash.workspace = true
|
rustc-hash.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
@@ -49,7 +49,7 @@ sql.workspace = true
|
|||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
table.workspace = true
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-stream = "0.1"
|
tokio-stream.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
cache.workspace = true
|
cache.workspace = true
|
||||||
|
|||||||
@@ -122,13 +122,6 @@ pub enum Error {
|
|||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
|
||||||
CompileScriptInternal {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: BoxedError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
table_info: String,
|
table_info: String,
|
||||||
@@ -343,9 +336,7 @@ impl ErrorExt for Error {
|
|||||||
Error::DecodePlan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::Internal { source, .. } => source.status_code(),
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
Error::Datafusion { error, .. } => datafusion_status_code::<Self>(error, None),
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ impl KvBackend for CachedKvBackend {
|
|||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |v| !self.validate_version(*v))
|
.is_some_and(|v| !self.validate_version(*v))
|
||||||
{
|
{
|
||||||
self.cache.invalidate(key).await;
|
self.cache.invalidate(key).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
|||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
|
use table::metadata::TableId;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
@@ -286,6 +287,28 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn tables_by_ids(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
table_ids: &[TableId],
|
||||||
|
) -> Result<Vec<TableRef>> {
|
||||||
|
let table_info_values = self
|
||||||
|
.table_metadata_manager
|
||||||
|
.table_info_manager()
|
||||||
|
.batch_get(table_ids)
|
||||||
|
.await
|
||||||
|
.context(TableMetadataManagerSnafu)?;
|
||||||
|
|
||||||
|
let tables = table_info_values
|
||||||
|
.into_values()
|
||||||
|
.filter(|t| t.table_info.catalog_name == catalog && t.table_info.schema_name == schema)
|
||||||
|
.map(build_table)
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
Ok(tables)
|
||||||
|
}
|
||||||
|
|
||||||
fn tables<'a>(
|
fn tables<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
catalog: &'a str,
|
catalog: &'a str,
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ pub mod information_schema {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub mod table_source;
|
pub mod table_source;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait CatalogManager: Send + Sync {
|
pub trait CatalogManager: Send + Sync {
|
||||||
fn as_any(&self) -> &dyn Any;
|
fn as_any(&self) -> &dyn Any;
|
||||||
@@ -86,6 +87,14 @@ pub trait CatalogManager: Send + Sync {
|
|||||||
query_ctx: Option<&QueryContext>,
|
query_ctx: Option<&QueryContext>,
|
||||||
) -> Result<Option<TableRef>>;
|
) -> Result<Option<TableRef>>;
|
||||||
|
|
||||||
|
/// Returns the tables by table ids.
|
||||||
|
async fn tables_by_ids(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
table_ids: &[TableId],
|
||||||
|
) -> Result<Vec<TableRef>>;
|
||||||
|
|
||||||
/// Returns all tables with a stream by catalog and schema.
|
/// Returns all tables with a stream by catalog and schema.
|
||||||
fn tables<'a>(
|
fn tables<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
use async_stream::{stream, try_stream};
|
use async_stream::{stream, try_stream};
|
||||||
@@ -28,6 +28,7 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
|
|||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
use table::metadata::TableId;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
|
||||||
@@ -143,6 +144,33 @@ impl CatalogManager for MemoryCatalogManager {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn tables_by_ids(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
table_ids: &[TableId],
|
||||||
|
) -> Result<Vec<TableRef>> {
|
||||||
|
let catalogs = self.catalogs.read().unwrap();
|
||||||
|
|
||||||
|
let schemas = catalogs.get(catalog).context(CatalogNotFoundSnafu {
|
||||||
|
catalog_name: catalog,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let tables = schemas
|
||||||
|
.get(schema)
|
||||||
|
.context(SchemaNotFoundSnafu { catalog, schema })?;
|
||||||
|
|
||||||
|
let filter_ids: HashSet<_> = table_ids.iter().collect();
|
||||||
|
// It is very inefficient, but we do not need to optimize it since it will not be called in `MemoryCatalogManager`.
|
||||||
|
let tables = tables
|
||||||
|
.values()
|
||||||
|
.filter(|t| filter_ids.contains(&t.table_info().table_id()))
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Ok(tables)
|
||||||
|
}
|
||||||
|
|
||||||
fn tables<'a>(
|
fn tables<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
catalog: &'a str,
|
catalog: &'a str,
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ trait SystemSchemaProviderInner {
|
|||||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||||
|
|
||||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||||
let table_meta = TableMetaBuilder::default()
|
let table_meta = TableMetaBuilder::empty()
|
||||||
.schema(table.schema())
|
.schema(table.schema())
|
||||||
.primary_key_indices(vec![])
|
.primary_key_indices(vec![])
|
||||||
.next_column_id(0)
|
.next_column_id(0)
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
///
|
///
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ use crate::error::{
|
|||||||
use crate::information_schema::Predicates;
|
use crate::information_schema::Predicates;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaColumns {
|
pub(super) struct InformationSchemaColumns {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -364,10 +365,6 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
self.numeric_scales.push(None);
|
self.numeric_scales.push(None);
|
||||||
|
|
||||||
match &column_schema.data_type {
|
match &column_schema.data_type {
|
||||||
ConcreteDataType::DateTime(datetime_type) => {
|
|
||||||
self.datetime_precisions
|
|
||||||
.push(Some(datetime_type.precision() as i64));
|
|
||||||
}
|
|
||||||
ConcreteDataType::Timestamp(ts_type) => {
|
ConcreteDataType::Timestamp(ts_type) => {
|
||||||
self.datetime_precisions
|
self.datetime_precisions
|
||||||
.push(Some(ts_type.precision() as i64));
|
.push(Some(ts_type.precision() as i64));
|
||||||
|
|||||||
@@ -28,16 +28,19 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
|||||||
use datatypes::prelude::ConcreteDataType as CDT;
|
use datatypes::prelude::ConcreteDataType as CDT;
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::timestamp::TimestampMillisecond;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||||
|
UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||||
};
|
};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||||
|
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::{Predicates, FLOWS};
|
use crate::information_schema::{Predicates, FLOWS};
|
||||||
use crate::system_schema::information_schema::InformationTable;
|
use crate::system_schema::information_schema::InformationTable;
|
||||||
@@ -59,9 +62,13 @@ pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
|||||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||||
pub const OPTIONS: &str = "options";
|
pub const OPTIONS: &str = "options";
|
||||||
|
pub const CREATED_TIME: &str = "created_time";
|
||||||
|
pub const UPDATED_TIME: &str = "updated_time";
|
||||||
|
pub const LAST_EXECUTION_TIME: &str = "last_execution_time";
|
||||||
|
pub const SOURCE_TABLE_NAMES: &str = "source_table_names";
|
||||||
|
|
||||||
/// The `information_schema.flows` to provides information about flows in databases.
|
/// The `information_schema.flows` to provides information about flows in databases.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaFlows {
|
pub(super) struct InformationSchemaFlows {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -99,6 +106,14 @@ impl InformationSchemaFlows {
|
|||||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||||
(OPTIONS, CDT::string_datatype(), true),
|
(OPTIONS, CDT::string_datatype(), true),
|
||||||
|
(CREATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||||
|
(UPDATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||||
|
(
|
||||||
|
LAST_EXECUTION_TIME,
|
||||||
|
CDT::timestamp_millisecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
(SOURCE_TABLE_NAMES, CDT::string_datatype(), true),
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||||
@@ -170,6 +185,10 @@ struct InformationSchemaFlowsBuilder {
|
|||||||
sink_table_names: StringVectorBuilder,
|
sink_table_names: StringVectorBuilder,
|
||||||
flownode_id_groups: StringVectorBuilder,
|
flownode_id_groups: StringVectorBuilder,
|
||||||
option_groups: StringVectorBuilder,
|
option_groups: StringVectorBuilder,
|
||||||
|
created_time: TimestampMillisecondVectorBuilder,
|
||||||
|
updated_time: TimestampMillisecondVectorBuilder,
|
||||||
|
last_execution_time: TimestampMillisecondVectorBuilder,
|
||||||
|
source_table_names: StringVectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaFlowsBuilder {
|
impl InformationSchemaFlowsBuilder {
|
||||||
@@ -196,6 +215,10 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
created_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
updated_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
last_execution_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
source_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,13 +258,14 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
flow_name: flow_name.to_string(),
|
flow_name: flow_name.to_string(),
|
||||||
})?;
|
})?;
|
||||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?;
|
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.finish()
|
self.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_flow(
|
async fn add_flow(
|
||||||
&mut self,
|
&mut self,
|
||||||
predicates: &Predicates,
|
predicates: &Predicates,
|
||||||
flow_id: FlowId,
|
flow_id: FlowId,
|
||||||
@@ -290,6 +314,36 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
input: format!("{:?}", flow_info.options()),
|
input: format!("{:?}", flow_info.options()),
|
||||||
},
|
},
|
||||||
)?));
|
)?));
|
||||||
|
self.created_time
|
||||||
|
.push(Some(flow_info.created_time().timestamp_millis().into()));
|
||||||
|
self.updated_time
|
||||||
|
.push(Some(flow_info.updated_time().timestamp_millis().into()));
|
||||||
|
self.last_execution_time
|
||||||
|
.push(flow_stat.as_ref().and_then(|state| {
|
||||||
|
state
|
||||||
|
.last_exec_time_map
|
||||||
|
.get(&flow_id)
|
||||||
|
.map(|v| TimestampMillisecond::new(*v))
|
||||||
|
}));
|
||||||
|
|
||||||
|
let mut source_table_names = vec![];
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||||
|
source_table_names.extend(
|
||||||
|
catalog_manager
|
||||||
|
.tables_by_ids(&catalog_name, &schema_name, flow_info.source_table_ids())
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|table| table.table_info().full_table_name()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let source_table_names = source_table_names.join(",");
|
||||||
|
self.source_table_names.push(Some(&source_table_names));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -307,6 +361,10 @@ impl InformationSchemaFlowsBuilder {
|
|||||||
Arc::new(self.sink_table_names.finish()),
|
Arc::new(self.sink_table_names.finish()),
|
||||||
Arc::new(self.flownode_id_groups.finish()),
|
Arc::new(self.flownode_id_groups.finish()),
|
||||||
Arc::new(self.option_groups.finish()),
|
Arc::new(self.option_groups.finish()),
|
||||||
|
Arc::new(self.created_time.finish()),
|
||||||
|
Arc::new(self.updated_time.finish()),
|
||||||
|
Arc::new(self.last_execution_time.finish()),
|
||||||
|
Arc::new(self.source_table_names.finish()),
|
||||||
];
|
];
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
|||||||
|
|
||||||
use super::table_names::*;
|
use super::table_names::*;
|
||||||
use crate::system_schema::utils::tables::{
|
use crate::system_schema::utils::tables::{
|
||||||
bigint_column, datetime_column, string_column, string_columns,
|
bigint_column, string_column, string_columns, timestamp_micro_column,
|
||||||
};
|
};
|
||||||
|
|
||||||
const NO_VALUE: &str = "NO";
|
const NO_VALUE: &str = "NO";
|
||||||
@@ -163,17 +163,17 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
string_column("EVENT_BODY"),
|
string_column("EVENT_BODY"),
|
||||||
string_column("EVENT_DEFINITION"),
|
string_column("EVENT_DEFINITION"),
|
||||||
string_column("EVENT_TYPE"),
|
string_column("EVENT_TYPE"),
|
||||||
datetime_column("EXECUTE_AT"),
|
timestamp_micro_column("EXECUTE_AT"),
|
||||||
bigint_column("INTERVAL_VALUE"),
|
bigint_column("INTERVAL_VALUE"),
|
||||||
string_column("INTERVAL_FIELD"),
|
string_column("INTERVAL_FIELD"),
|
||||||
string_column("SQL_MODE"),
|
string_column("SQL_MODE"),
|
||||||
datetime_column("STARTS"),
|
timestamp_micro_column("STARTS"),
|
||||||
datetime_column("ENDS"),
|
timestamp_micro_column("ENDS"),
|
||||||
string_column("STATUS"),
|
string_column("STATUS"),
|
||||||
string_column("ON_COMPLETION"),
|
string_column("ON_COMPLETION"),
|
||||||
datetime_column("CREATED"),
|
timestamp_micro_column("CREATED"),
|
||||||
datetime_column("LAST_ALTERED"),
|
timestamp_micro_column("LAST_ALTERED"),
|
||||||
datetime_column("LAST_EXECUTED"),
|
timestamp_micro_column("LAST_EXECUTED"),
|
||||||
string_column("EVENT_COMMENT"),
|
string_column("EVENT_COMMENT"),
|
||||||
bigint_column("ORIGINATOR"),
|
bigint_column("ORIGINATOR"),
|
||||||
string_column("CHARACTER_SET_CLIENT"),
|
string_column("CHARACTER_SET_CLIENT"),
|
||||||
@@ -204,10 +204,10 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
bigint_column("INITIAL_SIZE"),
|
bigint_column("INITIAL_SIZE"),
|
||||||
bigint_column("MAXIMUM_SIZE"),
|
bigint_column("MAXIMUM_SIZE"),
|
||||||
bigint_column("AUTOEXTEND_SIZE"),
|
bigint_column("AUTOEXTEND_SIZE"),
|
||||||
datetime_column("CREATION_TIME"),
|
timestamp_micro_column("CREATION_TIME"),
|
||||||
datetime_column("LAST_UPDATE_TIME"),
|
timestamp_micro_column("LAST_UPDATE_TIME"),
|
||||||
datetime_column("LAST_ACCESS_TIME"),
|
timestamp_micro_column("LAST_ACCESS_TIME"),
|
||||||
datetime_column("RECOVER_TIME"),
|
timestamp_micro_column("RECOVER_TIME"),
|
||||||
bigint_column("TRANSACTION_COUNTER"),
|
bigint_column("TRANSACTION_COUNTER"),
|
||||||
string_column("VERSION"),
|
string_column("VERSION"),
|
||||||
string_column("ROW_FORMAT"),
|
string_column("ROW_FORMAT"),
|
||||||
@@ -217,9 +217,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
bigint_column("MAX_DATA_LENGTH"),
|
bigint_column("MAX_DATA_LENGTH"),
|
||||||
bigint_column("INDEX_LENGTH"),
|
bigint_column("INDEX_LENGTH"),
|
||||||
bigint_column("DATA_FREE"),
|
bigint_column("DATA_FREE"),
|
||||||
datetime_column("CREATE_TIME"),
|
timestamp_micro_column("CREATE_TIME"),
|
||||||
datetime_column("UPDATE_TIME"),
|
timestamp_micro_column("UPDATE_TIME"),
|
||||||
datetime_column("CHECK_TIME"),
|
timestamp_micro_column("CHECK_TIME"),
|
||||||
string_column("CHECKSUM"),
|
string_column("CHECKSUM"),
|
||||||
string_column("STATUS"),
|
string_column("STATUS"),
|
||||||
string_column("EXTRA"),
|
string_column("EXTRA"),
|
||||||
@@ -330,8 +330,8 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
string_column("SQL_DATA_ACCESS"),
|
string_column("SQL_DATA_ACCESS"),
|
||||||
string_column("SQL_PATH"),
|
string_column("SQL_PATH"),
|
||||||
string_column("SECURITY_TYPE"),
|
string_column("SECURITY_TYPE"),
|
||||||
datetime_column("CREATED"),
|
timestamp_micro_column("CREATED"),
|
||||||
datetime_column("LAST_ALTERED"),
|
timestamp_micro_column("LAST_ALTERED"),
|
||||||
string_column("SQL_MODE"),
|
string_column("SQL_MODE"),
|
||||||
string_column("ROUTINE_COMMENT"),
|
string_column("ROUTINE_COMMENT"),
|
||||||
string_column("DEFINER"),
|
string_column("DEFINER"),
|
||||||
@@ -383,7 +383,7 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||||
string_column("ACTION_REFERENCE_OLD_ROW"),
|
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||||
string_column("ACTION_REFERENCE_NEW_ROW"),
|
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||||
datetime_column("CREATED"),
|
timestamp_micro_column("CREATED"),
|
||||||
string_column("SQL_MODE"),
|
string_column("SQL_MODE"),
|
||||||
string_column("DEFINER"),
|
string_column("DEFINER"),
|
||||||
string_column("CHARACTER_SET_CLIENT"),
|
string_column("CHARACTER_SET_CLIENT"),
|
||||||
|
|||||||
@@ -58,8 +58,11 @@ pub(crate) const TIME_INDEX_CONSTRAINT_NAME: &str = "TIME INDEX";
|
|||||||
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
pub(crate) const INVERTED_INDEX_CONSTRAINT_NAME: &str = "INVERTED INDEX";
|
||||||
/// Fulltext index constraint name
|
/// Fulltext index constraint name
|
||||||
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
pub(crate) const FULLTEXT_INDEX_CONSTRAINT_NAME: &str = "FULLTEXT INDEX";
|
||||||
|
/// Skipping index constraint name
|
||||||
|
pub(crate) const SKIPPING_INDEX_CONSTRAINT_NAME: &str = "SKIPPING INDEX";
|
||||||
|
|
||||||
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
/// The virtual table implementation for `information_schema.KEY_COLUMN_USAGE`.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaKeyColumnUsage {
|
pub(super) struct InformationSchemaKeyColumnUsage {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -246,10 +249,12 @@ impl InformationSchemaKeyColumnUsageBuilder {
|
|||||||
if column.is_inverted_indexed() {
|
if column.is_inverted_indexed() {
|
||||||
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
constraints.push(INVERTED_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_fulltext_indexed() {
|
||||||
if column.has_fulltext_index_key() {
|
|
||||||
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
constraints.push(FULLTEXT_INDEX_CONSTRAINT_NAME);
|
||||||
}
|
}
|
||||||
|
if column.is_skipping_indexed() {
|
||||||
|
constraints.push(SKIPPING_INDEX_CONSTRAINT_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
if !constraints.is_empty() {
|
if !constraints.is_empty() {
|
||||||
let aggregated_constraints = constraints.join(", ");
|
let aggregated_constraints = constraints.join(", ");
|
||||||
|
|||||||
@@ -20,17 +20,18 @@ use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::datetime::DateTime;
|
|
||||||
use datafusion::execution::TaskContext;
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::timestamp::TimestampMicrosecond;
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
|
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||||
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
|
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||||
|
UInt64VectorBuilder,
|
||||||
};
|
};
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::{StreamExt, TryStreamExt};
|
||||||
use partition::manager::PartitionInfo;
|
use partition::manager::PartitionInfo;
|
||||||
@@ -59,6 +60,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// The `PARTITIONS` table provides information about partitioned tables.
|
/// The `PARTITIONS` table provides information about partitioned tables.
|
||||||
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
/// See https://dev.mysql.com/doc/refman/8.0/en/information-schema-partitions-table.html
|
||||||
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
/// We provide an extral column `greptime_partition_id` for GreptimeDB region id.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaPartitions {
|
pub(super) struct InformationSchemaPartitions {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -126,9 +128,21 @@ impl InformationSchemaPartitions {
|
|||||||
ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
|
||||||
ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
|
||||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||||
ColumnSchema::new("create_time", ConcreteDataType::datetime_datatype(), true),
|
ColumnSchema::new(
|
||||||
ColumnSchema::new("update_time", ConcreteDataType::datetime_datatype(), true),
|
"create_time",
|
||||||
ColumnSchema::new("check_time", ConcreteDataType::datetime_datatype(), true),
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"update_time",
|
||||||
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
"check_time",
|
||||||
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
"partition_comment",
|
"partition_comment",
|
||||||
@@ -199,7 +213,7 @@ struct InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder,
|
partition_names: StringVectorBuilder,
|
||||||
partition_ordinal_positions: Int64VectorBuilder,
|
partition_ordinal_positions: Int64VectorBuilder,
|
||||||
partition_expressions: StringVectorBuilder,
|
partition_expressions: StringVectorBuilder,
|
||||||
create_times: DateTimeVectorBuilder,
|
create_times: TimestampMicrosecondVectorBuilder,
|
||||||
partition_ids: UInt64VectorBuilder,
|
partition_ids: UInt64VectorBuilder,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,7 +233,7 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
create_times: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -323,7 +337,7 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.partition_expressions.push(expressions.as_deref());
|
self.partition_expressions.push(expressions.as_deref());
|
||||||
self.create_times.push(Some(DateTime::from(
|
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||||
table_info.meta.created_on.timestamp_millis(),
|
table_info.meta.created_on.timestamp_millis(),
|
||||||
)));
|
)));
|
||||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||||
@@ -341,8 +355,8 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
Arc::new(Int64Vector::from(vec![None])),
|
Arc::new(Int64Vector::from(vec![None])),
|
||||||
rows_num,
|
rows_num,
|
||||||
));
|
));
|
||||||
let null_datetime_vector = Arc::new(ConstantVector::new(
|
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||||
Arc::new(DateTimeVector::from(vec![None])),
|
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||||
rows_num,
|
rows_num,
|
||||||
));
|
));
|
||||||
let partition_methods = Arc::new(ConstantVector::new(
|
let partition_methods = Arc::new(ConstantVector::new(
|
||||||
@@ -372,8 +386,8 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
null_i64_vector.clone(),
|
null_i64_vector.clone(),
|
||||||
Arc::new(self.create_times.finish()),
|
Arc::new(self.create_times.finish()),
|
||||||
// TODO(dennis): supports update_time
|
// TODO(dennis): supports update_time
|
||||||
null_datetime_vector.clone(),
|
null_timestampmicrosecond_vector.clone(),
|
||||||
null_datetime_vector,
|
null_timestampmicrosecond_vector,
|
||||||
null_i64_vector,
|
null_i64_vector,
|
||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `end_time`: the ending execution time of the procedure.
|
/// - `end_time`: the ending execution time of the procedure.
|
||||||
/// - `status`: the status of the procedure.
|
/// - `status`: the status of the procedure.
|
||||||
/// - `lock_keys`: the lock keys of the procedure.
|
/// - `lock_keys`: the lock keys of the procedure.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaProcedureInfo {
|
pub(super) struct InformationSchemaProcedureInfo {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `is_leader`: whether the peer is the leader
|
/// - `is_leader`: whether the peer is the leader
|
||||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionPeers {
|
pub(super) struct InformationSchemaRegionPeers {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `index_size`: The sst index files size in bytes.
|
/// - `index_size`: The sst index files size in bytes.
|
||||||
/// - `engine`: The engine type.
|
/// - `engine`: The engine type.
|
||||||
/// - `region_role`: The region role.
|
/// - `region_role`: The region role.
|
||||||
///
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaRegionStatistics {
|
pub(super) struct InformationSchemaRegionStatistics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ use store_api::storage::{ScanRequest, TableId};
|
|||||||
use super::{InformationTable, RUNTIME_METRICS};
|
use super::{InformationTable, RUNTIME_METRICS};
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaMetrics {
|
pub(super) struct InformationSchemaMetrics {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ pub const SCHEMA_OPTS: &str = "options";
|
|||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
/// The `information_schema.schemata` table implementation.
|
/// The `information_schema.schemata` table implementation.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaSchemata {
|
pub(super) struct InformationSchemaSchemata {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ use crate::information_schema::Predicates;
|
|||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
/// The `TABLE_CONSTRAINTS` table describes which tables have constraints.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTableConstraints {
|
pub(super) struct InformationSchemaTableConstraints {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -30,7 +30,8 @@ use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
|||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
DateTimeVectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
StringVectorBuilder, TimestampMicrosecondVectorBuilder, UInt32VectorBuilder,
|
||||||
|
UInt64VectorBuilder,
|
||||||
};
|
};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -71,6 +72,7 @@ const TABLE_ID: &str = "table_id";
|
|||||||
pub const ENGINE: &str = "engine";
|
pub const ENGINE: &str = "engine";
|
||||||
const INIT_CAPACITY: usize = 42;
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaTables {
|
pub(super) struct InformationSchemaTables {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -104,9 +106,21 @@ impl InformationSchemaTables {
|
|||||||
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(CREATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
ColumnSchema::new(
|
||||||
ColumnSchema::new(UPDATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
CREATE_TIME,
|
||||||
ColumnSchema::new(CHECK_TIME, ConcreteDataType::datetime_datatype(), true),
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
UPDATE_TIME,
|
||||||
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CHECK_TIME,
|
||||||
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
|
true,
|
||||||
|
),
|
||||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
||||||
@@ -181,9 +195,9 @@ struct InformationSchemaTablesBuilder {
|
|||||||
max_index_length: UInt64VectorBuilder,
|
max_index_length: UInt64VectorBuilder,
|
||||||
data_free: UInt64VectorBuilder,
|
data_free: UInt64VectorBuilder,
|
||||||
auto_increment: UInt64VectorBuilder,
|
auto_increment: UInt64VectorBuilder,
|
||||||
create_time: DateTimeVectorBuilder,
|
create_time: TimestampMicrosecondVectorBuilder,
|
||||||
update_time: DateTimeVectorBuilder,
|
update_time: TimestampMicrosecondVectorBuilder,
|
||||||
check_time: DateTimeVectorBuilder,
|
check_time: TimestampMicrosecondVectorBuilder,
|
||||||
table_collation: StringVectorBuilder,
|
table_collation: StringVectorBuilder,
|
||||||
checksum: UInt64VectorBuilder,
|
checksum: UInt64VectorBuilder,
|
||||||
create_options: StringVectorBuilder,
|
create_options: StringVectorBuilder,
|
||||||
@@ -218,9 +232,9 @@ impl InformationSchemaTablesBuilder {
|
|||||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
create_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
create_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
update_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
update_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
check_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
check_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ pub const CHARACTER_SET_CLIENT: &str = "character_set_client";
|
|||||||
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
pub const COLLATION_CONNECTION: &str = "collation_connection";
|
||||||
|
|
||||||
/// The `information_schema.views` to provides information about views in databases.
|
/// The `information_schema.views` to provides information about views in databases.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaViews {
|
pub(super) struct InformationSchemaViews {
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ use super::SystemTable;
|
|||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
|
||||||
/// A memory table with specified schema and columns.
|
/// A memory table with specified schema and columns.
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) struct MemoryTable {
|
pub(crate) struct MemoryTable {
|
||||||
pub(crate) table_id: TableId,
|
pub(crate) table_id: TableId,
|
||||||
pub(crate) table_name: &'static str,
|
pub(crate) table_name: &'static str,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
mod pg_catalog_memory_table;
|
mod pg_catalog_memory_table;
|
||||||
mod pg_class;
|
mod pg_class;
|
||||||
|
mod pg_database;
|
||||||
mod pg_namespace;
|
mod pg_namespace;
|
||||||
mod table_names;
|
mod table_names;
|
||||||
|
|
||||||
@@ -26,6 +27,7 @@ use lazy_static::lazy_static;
|
|||||||
use paste::paste;
|
use paste::paste;
|
||||||
use pg_catalog_memory_table::get_schema_columns;
|
use pg_catalog_memory_table::get_schema_columns;
|
||||||
use pg_class::PGClass;
|
use pg_class::PGClass;
|
||||||
|
use pg_database::PGDatabase;
|
||||||
use pg_namespace::PGNamespace;
|
use pg_namespace::PGNamespace;
|
||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
@@ -113,6 +115,10 @@ impl PGCatalogProvider {
|
|||||||
PG_CLASS.to_string(),
|
PG_CLASS.to_string(),
|
||||||
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
self.build_table(PG_CLASS).expect(PG_NAMESPACE),
|
||||||
);
|
);
|
||||||
|
tables.insert(
|
||||||
|
PG_DATABASE.to_string(),
|
||||||
|
self.build_table(PG_DATABASE).expect(PG_DATABASE),
|
||||||
|
);
|
||||||
self.tables = tables;
|
self.tables = tables;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,6 +141,11 @@ impl SystemSchemaProviderInner for PGCatalogProvider {
|
|||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
self.namespace_oid_map.clone(),
|
self.namespace_oid_map.clone(),
|
||||||
))),
|
))),
|
||||||
|
table_names::PG_DATABASE => Some(Arc::new(PGDatabase::new(
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
))),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -100,6 +101,15 @@ impl PGClass {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGClass {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGClass")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGClass {
|
impl SystemTable for PGClass {
|
||||||
fn table_id(&self) -> table::metadata::TableId {
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
PG_CATALOG_PG_CLASS_TABLE_ID
|
PG_CATALOG_PG_CLASS_TABLE_ID
|
||||||
|
|||||||
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
223
src/catalog/src/system_schema/pg_catalog/pg_database.rs
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_catalog::consts::PG_CATALOG_PG_DATABASE_TABLE_ID;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datatypes::scalars::ScalarVectorBuilder;
|
||||||
|
use datatypes::schema::{Schema, SchemaRef};
|
||||||
|
use datatypes::value::Value;
|
||||||
|
use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
|
use super::pg_namespace::oid_map::PGNamespaceOidMapRef;
|
||||||
|
use super::{query_ctx, OID_COLUMN_NAME, PG_DATABASE};
|
||||||
|
use crate::error::{
|
||||||
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
|
};
|
||||||
|
use crate::information_schema::Predicates;
|
||||||
|
use crate::system_schema::utils::tables::{string_column, u32_column};
|
||||||
|
use crate::system_schema::SystemTable;
|
||||||
|
use crate::CatalogManager;
|
||||||
|
|
||||||
|
// === column name ===
|
||||||
|
pub const DATNAME: &str = "datname";
|
||||||
|
|
||||||
|
/// The initial capacity of the vector builders.
|
||||||
|
const INIT_CAPACITY: usize = 42;
|
||||||
|
|
||||||
|
/// The `pg_catalog.database` table implementation.
|
||||||
|
pub(super) struct PGDatabase {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
|
||||||
|
// Workaround to convert schema_name to a numeric id
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for PGDatabase {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("PGDatabase")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGDatabase {
|
||||||
|
pub(super) fn new(
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema: Self::schema(),
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema() -> SchemaRef {
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
u32_column(OID_COLUMN_NAME),
|
||||||
|
string_column(DATNAME),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> PGCDatabaseBuilder {
|
||||||
|
PGCDatabaseBuilder::new(
|
||||||
|
self.schema.clone(),
|
||||||
|
self.catalog_name.clone(),
|
||||||
|
self.catalog_manager.clone(),
|
||||||
|
self.namespace_oid_map.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for PGDatabase {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(None)
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SystemTable for PGDatabase {
|
||||||
|
fn table_id(&self) -> table::metadata::TableId {
|
||||||
|
PG_CATALOG_PG_DATABASE_TABLE_ID
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
PG_DATABASE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(
|
||||||
|
&self,
|
||||||
|
request: ScanRequest,
|
||||||
|
) -> Result<common_recordbatch::SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.make_database(Some(request))
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the `pg_catalog.pg_database` table row by row
|
||||||
|
/// `oid` use schema name as a workaround since we don't have numeric schema id.
|
||||||
|
/// `nspname` is the schema name.
|
||||||
|
struct PGCDatabaseBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder,
|
||||||
|
datname: StringVectorBuilder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PGCDatabaseBuilder {
|
||||||
|
fn new(
|
||||||
|
schema: SchemaRef,
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
namespace_oid_map: PGNamespaceOidMapRef,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
schema,
|
||||||
|
catalog_name,
|
||||||
|
catalog_manager,
|
||||||
|
namespace_oid_map,
|
||||||
|
|
||||||
|
oid: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
datname: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn make_database(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||||
|
let catalog_name = self.catalog_name.clone();
|
||||||
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.upgrade()
|
||||||
|
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||||
|
let predicates = Predicates::from_scan_request(&request);
|
||||||
|
for schema_name in catalog_manager
|
||||||
|
.schema_names(&catalog_name, query_ctx())
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.add_database(&predicates, &schema_name);
|
||||||
|
}
|
||||||
|
self.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_database(&mut self, predicates: &Predicates, schema_name: &str) {
|
||||||
|
let oid = self.namespace_oid_map.get_oid(schema_name);
|
||||||
|
let row: [(&str, &Value); 2] = [
|
||||||
|
(OID_COLUMN_NAME, &Value::from(oid)),
|
||||||
|
(DATNAME, &Value::from(schema_name)),
|
||||||
|
];
|
||||||
|
|
||||||
|
if !predicates.eval(&row) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.oid.push(Some(oid));
|
||||||
|
self.datname.push(Some(schema_name));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish(&mut self) -> Result<RecordBatch> {
|
||||||
|
let columns: Vec<VectorRef> =
|
||||||
|
vec![Arc::new(self.oid.finish()), Arc::new(self.datname.finish())];
|
||||||
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
pub(super) mod oid_map;
|
pub(super) mod oid_map;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -87,6 +88,15 @@ impl PGNamespace {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PGNamespace {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("PGNamespace")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SystemTable for PGNamespace {
|
impl SystemTable for PGNamespace {
|
||||||
fn schema(&self) -> SchemaRef {
|
fn schema(&self) -> SchemaRef {
|
||||||
self.schema.clone()
|
self.schema.clone()
|
||||||
|
|||||||
@@ -12,7 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub const PG_DATABASE: &str = "pg_databases";
|
// https://www.postgresql.org/docs/current/catalog-pg-database.html
|
||||||
|
pub const PG_DATABASE: &str = "pg_database";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-namespace.html
|
||||||
pub const PG_NAMESPACE: &str = "pg_namespace";
|
pub const PG_NAMESPACE: &str = "pg_namespace";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-class.html
|
||||||
pub const PG_CLASS: &str = "pg_class";
|
pub const PG_CLASS: &str = "pg_class";
|
||||||
|
// https://www.postgresql.org/docs/current/catalog-pg-type.html
|
||||||
pub const PG_TYPE: &str = "pg_type";
|
pub const PG_TYPE: &str = "pg_type";
|
||||||
|
|||||||
@@ -51,10 +51,10 @@ pub fn bigint_column(name: &str) -> ColumnSchema {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
pub fn timestamp_micro_column(name: &str) -> ColumnSchema {
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
str::to_lowercase(name),
|
str::to_lowercase(name),
|
||||||
ConcreteDataType::datetime_datatype(),
|
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ mod tests {
|
|||||||
Projection: person.id AS a, person.name AS b
|
Projection: person.id AS a, person.name AS b
|
||||||
Filter: person.id > Int32(500)
|
Filter: person.id > Int32(500)
|
||||||
TableScan: person"#,
|
TableScan: person"#,
|
||||||
format!("\n{:?}", source.get_logical_plan().unwrap())
|
format!("\n{}", source.get_logical_plan().unwrap())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,12 +15,12 @@
|
|||||||
//! Dummy catalog for region server.
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
use datafusion::catalog::schema::SchemaProvider;
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider};
|
||||||
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
|
||||||
use datafusion::datasource::TableProvider;
|
use datafusion::datasource::TableProvider;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
@@ -41,6 +41,12 @@ impl DummyCatalogList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogList {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogList").finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl CatalogProviderList for DummyCatalogList {
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
@@ -91,6 +97,14 @@ impl CatalogProvider for DummyCatalogProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummyCatalogProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummyCatalogProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A dummy schema provider for [DummyCatalogList].
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct DummySchemaProvider {
|
struct DummySchemaProvider {
|
||||||
@@ -127,3 +141,12 @@ impl SchemaProvider for DummySchemaProvider {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DummySchemaProvider {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("DummySchemaProvider")
|
||||||
|
.field("catalog_name", &self.catalog_name)
|
||||||
|
.field("schema_name", &self.schema_name)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,10 @@ version.workspace = true
|
|||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||||
|
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
@@ -40,6 +44,10 @@ futures.workspace = true
|
|||||||
humantime.workspace = true
|
humantime.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
|
opendal = { version = "0.51.1", features = [
|
||||||
|
"services-fs",
|
||||||
|
"services-s3",
|
||||||
|
] }
|
||||||
query.workspace = true
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
@@ -56,7 +64,6 @@ tokio.workspace = true
|
|||||||
tracing-appender.workspace = true
|
tracing-appender.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util.workspace = true
|
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|||||||
@@ -22,9 +22,15 @@ use clap::Parser;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
#[cfg(feature = "mysql_kvbackend")]
|
||||||
|
use common_meta::kv_backend::rds::MySqlStore;
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
use common_meta::kv_backend::rds::PgStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use common_wal::options::WalOptions;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
@@ -55,18 +61,47 @@ where
|
|||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct BenchTableMetadataCommand {
|
pub struct BenchTableMetadataCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
etcd_addr: String,
|
etcd_addr: Option<String>,
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
#[clap(long)]
|
||||||
|
postgres_addr: Option<String>,
|
||||||
|
#[cfg(feature = "mysql_kvbackend")]
|
||||||
|
#[clap(long)]
|
||||||
|
mysql_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
count: u32,
|
count: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BenchTableMetadataCommand {
|
impl BenchTableMetadataCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
let kv_backend = if let Some(etcd_addr) = &self.etcd_addr {
|
||||||
.await
|
info!("Using etcd as kv backend");
|
||||||
.unwrap();
|
EtcdStore::with_endpoints([etcd_addr], 128).await.unwrap()
|
||||||
|
} else {
|
||||||
|
Arc::new(MemoryKvBackend::new())
|
||||||
|
};
|
||||||
|
|
||||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
|
||||||
|
info!("Using postgres as kv backend");
|
||||||
|
PgStore::with_url(postgres_addr, "greptime_metakv", 128)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
} else {
|
||||||
|
kv_backend
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "mysql_kvbackend")]
|
||||||
|
let kv_backend = if let Some(mysql_addr) = &self.mysql_addr {
|
||||||
|
info!("Using mysql as kv backend");
|
||||||
|
MySqlStore::with_url(mysql_addr, "greptime_metakv", 128)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
} else {
|
||||||
|
kv_backend
|
||||||
|
};
|
||||||
|
|
||||||
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||||
|
|
||||||
let tool = BenchTableMetadata {
|
let tool = BenchTableMetadata {
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
@@ -165,7 +200,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
|||||||
region_routes
|
region_routes
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
|
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, WalOptions> {
|
||||||
// TODO(niebayes): construct region wal options for benchmark.
|
// TODO(niebayes): construct region wal options for benchmark.
|
||||||
let _ = regions;
|
let _ = regions;
|
||||||
HashMap::default()
|
HashMap::default()
|
||||||
|
|||||||
@@ -49,7 +49,12 @@ impl TableMetadataBencher {
|
|||||||
|
|
||||||
let regions: Vec<_> = (0..64).collect();
|
let regions: Vec<_> = (0..64).collect();
|
||||||
let region_routes = create_region_routes(regions.clone());
|
let region_routes = create_region_routes(regions.clone());
|
||||||
let region_wal_options = create_region_wal_options(regions);
|
let region_wal_options = create_region_wal_options(regions)
|
||||||
|
.into_iter()
|
||||||
|
.map(|(region_id, wal_options)| {
|
||||||
|
(region_id, serde_json::to_string(&wal_options).unwrap())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
@@ -109,9 +114,17 @@ impl TableMetadataBencher {
|
|||||||
let table_info = table_info.unwrap();
|
let table_info = table_info.unwrap();
|
||||||
let table_route = table_route.unwrap();
|
let table_route = table_route.unwrap();
|
||||||
let table_id = table_info.table_info.ident.table_id;
|
let table_id = table_info.table_info.ident.table_id;
|
||||||
|
|
||||||
|
let regions: Vec<_> = (0..64).collect();
|
||||||
|
let region_wal_options = create_region_wal_options(regions);
|
||||||
let _ = self
|
let _ = self
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.delete_table_metadata(table_id, &table_info.table_name(), &table_route)
|
.delete_table_metadata(
|
||||||
|
table_id,
|
||||||
|
&table_info.table_name(),
|
||||||
|
&table_route,
|
||||||
|
®ion_wal_options,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
start.elapsed()
|
start.elapsed()
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ use std::time::Duration;
|
|||||||
use base64::engine::general_purpose;
|
use base64::engine::general_purpose;
|
||||||
use base64::Engine;
|
use base64::Engine;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
use humantime::format_duration;
|
use humantime::format_duration;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
|
||||||
@@ -24,7 +25,9 @@ use servers::http::result::greptime_result_v1::GreptimedbV1Response;
|
|||||||
use servers::http::GreptimeQueryOutput;
|
use servers::http::GreptimeQueryOutput;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
|
use crate::error::{
|
||||||
|
BuildClientSnafu, HttpQuerySqlSnafu, ParseProxyOptsSnafu, Result, SerdeJsonSnafu,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct DatabaseClient {
|
pub struct DatabaseClient {
|
||||||
@@ -32,6 +35,23 @@ pub struct DatabaseClient {
|
|||||||
catalog: String,
|
catalog: String,
|
||||||
auth_header: Option<String>,
|
auth_header: Option<String>,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
|
proxy: Option<reqwest::Proxy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_proxy_opts(
|
||||||
|
proxy: Option<String>,
|
||||||
|
no_proxy: bool,
|
||||||
|
) -> std::result::Result<Option<reqwest::Proxy>, BoxedError> {
|
||||||
|
if no_proxy {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
proxy
|
||||||
|
.map(|proxy| {
|
||||||
|
reqwest::Proxy::all(proxy)
|
||||||
|
.context(ParseProxyOptsSnafu)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseClient {
|
impl DatabaseClient {
|
||||||
@@ -40,6 +60,7 @@ impl DatabaseClient {
|
|||||||
catalog: String,
|
catalog: String,
|
||||||
auth_basic: Option<String>,
|
auth_basic: Option<String>,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
|
proxy: Option<reqwest::Proxy>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let auth_header = if let Some(basic) = auth_basic {
|
let auth_header = if let Some(basic) = auth_basic {
|
||||||
let encoded = general_purpose::STANDARD.encode(basic);
|
let encoded = general_purpose::STANDARD.encode(basic);
|
||||||
@@ -48,11 +69,18 @@ impl DatabaseClient {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if let Some(ref proxy) = proxy {
|
||||||
|
common_telemetry::info!("Using proxy: {:?}", proxy);
|
||||||
|
} else {
|
||||||
|
common_telemetry::info!("Using system proxy(if any)");
|
||||||
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
addr,
|
addr,
|
||||||
catalog,
|
catalog,
|
||||||
auth_header,
|
auth_header,
|
||||||
timeout,
|
timeout,
|
||||||
|
proxy,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +95,13 @@ impl DatabaseClient {
|
|||||||
("db", format!("{}-{}", self.catalog, schema)),
|
("db", format!("{}-{}", self.catalog, schema)),
|
||||||
("sql", sql.to_string()),
|
("sql", sql.to_string()),
|
||||||
];
|
];
|
||||||
let mut request = reqwest::Client::new()
|
let client = self
|
||||||
|
.proxy
|
||||||
|
.clone()
|
||||||
|
.map(|proxy| reqwest::Client::builder().proxy(proxy).build())
|
||||||
|
.unwrap_or_else(|| Ok(reqwest::Client::new()))
|
||||||
|
.context(BuildClientSnafu)?;
|
||||||
|
let mut request = client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.form(¶ms)
|
.form(¶ms)
|
||||||
.header("Content-Type", "application/x-www-form-urlencoded");
|
.header("Content-Type", "application/x-www-form-urlencoded");
|
||||||
|
|||||||
@@ -86,6 +86,22 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to parse proxy options: {}", error))]
|
||||||
|
ParseProxyOpts {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build reqwest client: {}", error))]
|
||||||
|
BuildClient {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: reqwest::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid REPL command: {reason}"))]
|
#[snafu(display("Invalid REPL command: {reason}"))]
|
||||||
InvalidReplCommand { reason: String },
|
InvalidReplCommand { reason: String },
|
||||||
|
|
||||||
@@ -260,6 +276,24 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("OpenDAL operator failed"))]
|
||||||
|
OpenDal {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: opendal::Error,
|
||||||
|
},
|
||||||
|
#[snafu(display("S3 config need be set"))]
|
||||||
|
S3ConfigNotSet {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
#[snafu(display("Output directory not set"))]
|
||||||
|
OutputDirNotSet {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -278,7 +312,8 @@ impl ErrorExt for Error {
|
|||||||
| Error::InitTimezone { .. }
|
| Error::InitTimezone { .. }
|
||||||
| Error::ConnectEtcd { .. }
|
| Error::ConnectEtcd { .. }
|
||||||
| Error::CreateDir { .. }
|
| Error::CreateDir { .. }
|
||||||
| Error::EmptyResult { .. } => StatusCode::InvalidArguments,
|
| Error::EmptyResult { .. }
|
||||||
|
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
@@ -298,9 +333,13 @@ impl ErrorExt for Error {
|
|||||||
Error::SerdeJson { .. }
|
Error::SerdeJson { .. }
|
||||||
| Error::FileIo { .. }
|
| Error::FileIo { .. }
|
||||||
| Error::SpawnThread { .. }
|
| Error::SpawnThread { .. }
|
||||||
| Error::InitTlsProvider { .. } => StatusCode::Unexpected,
|
| Error::InitTlsProvider { .. }
|
||||||
|
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
|
Error::OpenDal { .. } => StatusCode::Internal,
|
||||||
|
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
|
||||||
|
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
|||||||
@@ -21,15 +21,18 @@ use async_trait::async_trait;
|
|||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_telemetry::{debug, error, info};
|
use common_telemetry::{debug, error, info};
|
||||||
|
use opendal::layers::LoggingLayer;
|
||||||
|
use opendal::{services, Operator};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::DatabaseClient;
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{
|
||||||
|
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||||
|
SchemaNotFoundSnafu,
|
||||||
|
};
|
||||||
use crate::{database, Tool};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
type TableReference = (String, String, String);
|
type TableReference = (String, String, String);
|
||||||
@@ -52,8 +55,9 @@ pub struct ExportCommand {
|
|||||||
addr: String,
|
addr: String,
|
||||||
|
|
||||||
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||||
|
/// for local export.
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
output_dir: String,
|
output_dir: Option<String>,
|
||||||
|
|
||||||
/// The name of the catalog to export.
|
/// The name of the catalog to export.
|
||||||
#[clap(long, default_value = "greptime-*")]
|
#[clap(long, default_value = "greptime-*")]
|
||||||
@@ -91,19 +95,71 @@ pub struct ExportCommand {
|
|||||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||||
#[clap(long, value_parser = humantime::parse_duration)]
|
#[clap(long, value_parser = humantime::parse_duration)]
|
||||||
timeout: Option<Duration>,
|
timeout: Option<Duration>,
|
||||||
|
|
||||||
|
/// The proxy server address to connect, if set, will override the system proxy.
|
||||||
|
///
|
||||||
|
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
|
||||||
|
#[clap(long)]
|
||||||
|
proxy: Option<String>,
|
||||||
|
|
||||||
|
/// Disable proxy server, if set, will not use any proxy.
|
||||||
|
#[clap(long)]
|
||||||
|
no_proxy: bool,
|
||||||
|
|
||||||
|
/// if export data to s3
|
||||||
|
#[clap(long)]
|
||||||
|
s3: bool,
|
||||||
|
|
||||||
|
/// The s3 bucket name
|
||||||
|
/// if s3 is set, this is required
|
||||||
|
#[clap(long)]
|
||||||
|
s3_bucket: Option<String>,
|
||||||
|
|
||||||
|
/// The s3 endpoint
|
||||||
|
/// if s3 is set, this is required
|
||||||
|
#[clap(long)]
|
||||||
|
s3_endpoint: Option<String>,
|
||||||
|
|
||||||
|
/// The s3 access key
|
||||||
|
/// if s3 is set, this is required
|
||||||
|
#[clap(long)]
|
||||||
|
s3_access_key: Option<String>,
|
||||||
|
|
||||||
|
/// The s3 secret key
|
||||||
|
/// if s3 is set, this is required
|
||||||
|
#[clap(long)]
|
||||||
|
s3_secret_key: Option<String>,
|
||||||
|
|
||||||
|
/// The s3 region
|
||||||
|
/// if s3 is set, this is required
|
||||||
|
#[clap(long)]
|
||||||
|
s3_region: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExportCommand {
|
impl ExportCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
|
if self.s3
|
||||||
|
&& (self.s3_bucket.is_none()
|
||||||
|
|| self.s3_endpoint.is_none()
|
||||||
|
|| self.s3_access_key.is_none()
|
||||||
|
|| self.s3_secret_key.is_none()
|
||||||
|
|| self.s3_region.is_none())
|
||||||
|
{
|
||||||
|
return Err(BoxedError::new(S3ConfigNotSetSnafu {}.build()));
|
||||||
|
}
|
||||||
|
if !self.s3 && self.output_dir.is_none() {
|
||||||
|
return Err(BoxedError::new(OutputDirNotSetSnafu {}.build()));
|
||||||
|
}
|
||||||
let (catalog, schema) =
|
let (catalog, schema) =
|
||||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||||
|
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||||
let database_client = DatabaseClient::new(
|
let database_client = DatabaseClient::new(
|
||||||
self.addr.clone(),
|
self.addr.clone(),
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
self.auth_basic.clone(),
|
self.auth_basic.clone(),
|
||||||
// Treats `None` as `0s` to disable server-side default timeout.
|
// Treats `None` as `0s` to disable server-side default timeout.
|
||||||
self.timeout.unwrap_or_default(),
|
self.timeout.unwrap_or_default(),
|
||||||
|
proxy,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Box::new(Export {
|
Ok(Box::new(Export {
|
||||||
@@ -115,24 +171,43 @@ impl ExportCommand {
|
|||||||
target: self.target.clone(),
|
target: self.target.clone(),
|
||||||
start_time: self.start_time.clone(),
|
start_time: self.start_time.clone(),
|
||||||
end_time: self.end_time.clone(),
|
end_time: self.end_time.clone(),
|
||||||
|
s3: self.s3,
|
||||||
|
s3_bucket: self.s3_bucket.clone(),
|
||||||
|
s3_endpoint: self.s3_endpoint.clone(),
|
||||||
|
s3_access_key: self.s3_access_key.clone(),
|
||||||
|
s3_secret_key: self.s3_secret_key.clone(),
|
||||||
|
s3_region: self.s3_region.clone(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct Export {
|
pub struct Export {
|
||||||
catalog: String,
|
catalog: String,
|
||||||
schema: Option<String>,
|
schema: Option<String>,
|
||||||
database_client: DatabaseClient,
|
database_client: DatabaseClient,
|
||||||
output_dir: String,
|
output_dir: Option<String>,
|
||||||
parallelism: usize,
|
parallelism: usize,
|
||||||
target: ExportTarget,
|
target: ExportTarget,
|
||||||
start_time: Option<String>,
|
start_time: Option<String>,
|
||||||
end_time: Option<String>,
|
end_time: Option<String>,
|
||||||
|
s3: bool,
|
||||||
|
s3_bucket: Option<String>,
|
||||||
|
s3_endpoint: Option<String>,
|
||||||
|
s3_access_key: Option<String>,
|
||||||
|
s3_secret_key: Option<String>,
|
||||||
|
s3_region: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Export {
|
impl Export {
|
||||||
fn catalog_path(&self) -> PathBuf {
|
fn catalog_path(&self) -> PathBuf {
|
||||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
if self.s3 {
|
||||||
|
PathBuf::from(&self.catalog)
|
||||||
|
} else if let Some(dir) = &self.output_dir {
|
||||||
|
PathBuf::from(dir).join(&self.catalog)
|
||||||
|
} else {
|
||||||
|
unreachable!("catalog_path: output_dir must be set when not using s3")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||||
@@ -289,19 +364,23 @@ impl Export {
|
|||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let db_names = self.get_db_names().await?;
|
let db_names = self.get_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
|
let operator = self.build_operator().await?;
|
||||||
|
|
||||||
for schema in db_names {
|
for schema in db_names {
|
||||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
|
||||||
tokio::fs::create_dir_all(&db_dir)
|
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
let file = db_dir.join("create_database.sql");
|
|
||||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
|
||||||
let create_database = self
|
let create_database = self
|
||||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||||
.await?;
|
.await?;
|
||||||
file.write_all(create_database.as_bytes())
|
|
||||||
.await
|
let file_path = self.get_file_path(&schema, "create_database.sql");
|
||||||
.context(FileIoSnafu)?;
|
self.write_to_storage(&operator, &file_path, create_database.into_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Exported {}.{} database creation SQL to {}",
|
||||||
|
self.catalog,
|
||||||
|
schema,
|
||||||
|
self.format_output_path(&file_path)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = timer.elapsed();
|
let elapsed = timer.elapsed();
|
||||||
@@ -315,149 +394,267 @@ impl Export {
|
|||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
let db_names = self.get_db_names().await?;
|
let db_names = self.get_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
|
let operator = Arc::new(self.build_operator().await?);
|
||||||
let mut tasks = Vec::with_capacity(db_names.len());
|
let mut tasks = Vec::with_capacity(db_names.len());
|
||||||
|
|
||||||
for schema in db_names {
|
for schema in db_names {
|
||||||
let semaphore_moved = semaphore.clone();
|
let semaphore_moved = semaphore.clone();
|
||||||
|
let export_self = self.clone();
|
||||||
|
let operator = operator.clone();
|
||||||
tasks.push(async move {
|
tasks.push(async move {
|
||||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
let (metric_physical_tables, remaining_tables, views) =
|
let (metric_physical_tables, remaining_tables, views) = export_self
|
||||||
self.get_table_list(&self.catalog, &schema).await?;
|
.get_table_list(&export_self.catalog, &schema)
|
||||||
let table_count =
|
.await?;
|
||||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
|
||||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
// Create directory if needed for file system storage
|
||||||
tokio::fs::create_dir_all(&db_dir)
|
if !export_self.s3 {
|
||||||
.await
|
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||||
.context(FileIoSnafu)?;
|
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||||
let file = db_dir.join("create_tables.sql");
|
|
||||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
|
||||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
|
||||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
|
||||||
file.write_all(create_table.as_bytes())
|
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
}
|
|
||||||
for (c, s, v) in views {
|
|
||||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
|
||||||
file.write_all(create_view.as_bytes())
|
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let file_path = export_self.get_file_path(&schema, "create_tables.sql");
|
||||||
|
let mut content = Vec::new();
|
||||||
|
|
||||||
|
// Add table creation SQL
|
||||||
|
for (c, s, t) in metric_physical_tables.iter().chain(&remaining_tables) {
|
||||||
|
let create_table = export_self.show_create("TABLE", c, s, Some(t)).await?;
|
||||||
|
content.extend_from_slice(create_table.as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add view creation SQL
|
||||||
|
for (c, s, v) in &views {
|
||||||
|
let create_view = export_self.show_create("VIEW", c, s, Some(v)).await?;
|
||||||
|
content.extend_from_slice(create_view.as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write to storage
|
||||||
|
export_self
|
||||||
|
.write_to_storage(&operator, &file_path, content)
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
"Finished exporting {}.{schema} with {} table schemas to path: {}",
|
||||||
self.catalog,
|
export_self.catalog,
|
||||||
db_dir.to_string_lossy()
|
metric_physical_tables.len() + remaining_tables.len() + views.len(),
|
||||||
|
export_self.format_output_path(&file_path)
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok::<(), Error>(())
|
Ok::<(), Error>(())
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let success = futures::future::join_all(tasks)
|
let success = self.execute_tasks(tasks).await;
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.filter(|r| match r {
|
|
||||||
Ok(_) => true,
|
|
||||||
Err(e) => {
|
|
||||||
error!(e; "export schema job failed");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.count();
|
|
||||||
|
|
||||||
let elapsed = timer.elapsed();
|
let elapsed = timer.elapsed();
|
||||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn build_operator(&self) -> Result<Operator> {
|
||||||
|
if self.s3 {
|
||||||
|
self.build_s3_operator().await
|
||||||
|
} else {
|
||||||
|
self.build_fs_operator().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_s3_operator(&self) -> Result<Operator> {
|
||||||
|
let mut builder = services::S3::default().root("").bucket(
|
||||||
|
self.s3_bucket
|
||||||
|
.as_ref()
|
||||||
|
.expect("s3_bucket must be provided when s3 is enabled"),
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||||
|
builder = builder.endpoint(endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(region) = self.s3_region.as_ref() {
|
||||||
|
builder = builder.region(region);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key_id) = self.s3_access_key.as_ref() {
|
||||||
|
builder = builder.access_key_id(key_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(secret_key) = self.s3_secret_key.as_ref() {
|
||||||
|
builder = builder.secret_access_key(secret_key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let op = Operator::new(builder)
|
||||||
|
.context(OpenDalSnafu)?
|
||||||
|
.layer(LoggingLayer::default())
|
||||||
|
.finish();
|
||||||
|
Ok(op)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_fs_operator(&self) -> Result<Operator> {
|
||||||
|
let root = self
|
||||||
|
.output_dir
|
||||||
|
.as_ref()
|
||||||
|
.context(OutputDirNotSetSnafu)?
|
||||||
|
.clone();
|
||||||
|
let op = Operator::new(services::Fs::default().root(&root))
|
||||||
|
.context(OpenDalSnafu)?
|
||||||
|
.layer(LoggingLayer::default())
|
||||||
|
.finish();
|
||||||
|
Ok(op)
|
||||||
|
}
|
||||||
|
|
||||||
async fn export_database_data(&self) -> Result<()> {
|
async fn export_database_data(&self) -> Result<()> {
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
let db_names = self.get_db_names().await?;
|
let db_names = self.get_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
let mut tasks = Vec::with_capacity(db_count);
|
let mut tasks = Vec::with_capacity(db_count);
|
||||||
|
let operator = Arc::new(self.build_operator().await?);
|
||||||
|
let with_options = build_with_options(&self.start_time, &self.end_time);
|
||||||
|
|
||||||
for schema in db_names {
|
for schema in db_names {
|
||||||
let semaphore_moved = semaphore.clone();
|
let semaphore_moved = semaphore.clone();
|
||||||
|
let export_self = self.clone();
|
||||||
|
let with_options_clone = with_options.clone();
|
||||||
|
let operator = operator.clone();
|
||||||
|
|
||||||
tasks.push(async move {
|
tasks.push(async move {
|
||||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
|
||||||
tokio::fs::create_dir_all(&db_dir)
|
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
|
|
||||||
let with_options = match (&self.start_time, &self.end_time) {
|
// Create directory if not using S3
|
||||||
(Some(start_time), Some(end_time)) => {
|
if !export_self.s3 {
|
||||||
format!(
|
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||||
start_time, end_time
|
}
|
||||||
)
|
|
||||||
}
|
|
||||||
(Some(start_time), None) => {
|
|
||||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
|
||||||
}
|
|
||||||
(None, Some(end_time)) => {
|
|
||||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
|
||||||
}
|
|
||||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
|
let (path, connection_part) = export_self.get_storage_params(&schema);
|
||||||
|
|
||||||
|
// Execute COPY DATABASE TO command
|
||||||
let sql = format!(
|
let sql = format!(
|
||||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
r#"COPY DATABASE "{}"."{}" TO '{}' WITH ({}){};"#,
|
||||||
self.catalog,
|
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||||
schema,
|
);
|
||||||
db_dir.to_str().unwrap(),
|
info!("Executing sql: {sql}");
|
||||||
with_options
|
export_self.database_client.sql_in_public(&sql).await?;
|
||||||
|
info!(
|
||||||
|
"Finished exporting {}.{} data to {}",
|
||||||
|
export_self.catalog, schema, path
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("Executing sql: {sql}");
|
// Create copy_from.sql file
|
||||||
|
let copy_database_from_sql = format!(
|
||||||
|
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
|
||||||
|
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||||
|
);
|
||||||
|
|
||||||
self.database_client.sql_in_public(&sql).await?;
|
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
|
||||||
|
export_self
|
||||||
|
.write_to_storage(
|
||||||
|
&operator,
|
||||||
|
©_from_path,
|
||||||
|
copy_database_from_sql.into_bytes(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Finished exporting {}.{schema} data into path: {}",
|
"Finished exporting {}.{} copy_from.sql to {}",
|
||||||
self.catalog,
|
export_self.catalog,
|
||||||
db_dir.to_string_lossy()
|
|
||||||
);
|
|
||||||
|
|
||||||
// The export copy from sql
|
|
||||||
let copy_from_file = db_dir.join("copy_from.sql");
|
|
||||||
let mut writer =
|
|
||||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
|
||||||
let copy_database_from_sql = format!(
|
|
||||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
|
||||||
self.catalog,
|
|
||||||
schema,
|
schema,
|
||||||
db_dir.to_str().unwrap()
|
export_self.format_output_path(©_from_path)
|
||||||
);
|
);
|
||||||
writer
|
|
||||||
.write(copy_database_from_sql.as_bytes())
|
|
||||||
.await
|
|
||||||
.context(FileIoSnafu)?;
|
|
||||||
writer.flush().await.context(FileIoSnafu)?;
|
|
||||||
|
|
||||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
|
||||||
|
|
||||||
Ok::<(), Error>(())
|
Ok::<(), Error>(())
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let success = futures::future::join_all(tasks)
|
let success = self.execute_tasks(tasks).await;
|
||||||
|
let elapsed = timer.elapsed();
|
||||||
|
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_file_path(&self, schema: &str, file_name: &str) -> String {
|
||||||
|
format!("{}/{}/{}", self.catalog, schema, file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_output_path(&self, file_path: &str) -> String {
|
||||||
|
if self.s3 {
|
||||||
|
format!(
|
||||||
|
"s3://{}/{}",
|
||||||
|
self.s3_bucket.as_ref().unwrap_or(&String::new()),
|
||||||
|
file_path
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
format!(
|
||||||
|
"{}/{}",
|
||||||
|
self.output_dir.as_ref().unwrap_or(&String::new()),
|
||||||
|
file_path
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_to_storage(
|
||||||
|
&self,
|
||||||
|
op: &Operator,
|
||||||
|
file_path: &str,
|
||||||
|
content: Vec<u8>,
|
||||||
|
) -> Result<()> {
|
||||||
|
op.write(file_path, content).await.context(OpenDalSnafu)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_storage_params(&self, schema: &str) -> (String, String) {
|
||||||
|
if self.s3 {
|
||||||
|
let s3_path = format!(
|
||||||
|
"s3://{}/{}/{}/",
|
||||||
|
// Safety: s3_bucket is required when s3 is enabled
|
||||||
|
self.s3_bucket.as_ref().unwrap(),
|
||||||
|
self.catalog,
|
||||||
|
schema
|
||||||
|
);
|
||||||
|
|
||||||
|
// endpoint is optional
|
||||||
|
let endpoint_option = if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||||
|
format!(", ENDPOINT='{}'", endpoint)
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Safety: All s3 options are required
|
||||||
|
let connection_options = format!(
|
||||||
|
"ACCESS_KEY_ID='{}', SECRET_ACCESS_KEY='{}', REGION='{}'{}",
|
||||||
|
self.s3_access_key.as_ref().unwrap(),
|
||||||
|
self.s3_secret_key.as_ref().unwrap(),
|
||||||
|
self.s3_region.as_ref().unwrap(),
|
||||||
|
endpoint_option
|
||||||
|
);
|
||||||
|
|
||||||
|
(s3_path, format!(" CONNECTION ({})", connection_options))
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
self.catalog_path()
|
||||||
|
.join(format!("{schema}/"))
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
|
String::new(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute_tasks(
|
||||||
|
&self,
|
||||||
|
tasks: Vec<impl std::future::Future<Output = Result<()>>>,
|
||||||
|
) -> usize {
|
||||||
|
futures::future::join_all(tasks)
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|r| match r {
|
.filter(|r| match r {
|
||||||
Ok(_) => true,
|
Ok(_) => true,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(e; "export database job failed");
|
error!(e; "export job failed");
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.count();
|
.count()
|
||||||
let elapsed = timer.elapsed();
|
|
||||||
|
|
||||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -482,3 +679,15 @@ impl Tool for Export {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
|
||||||
|
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
|
||||||
|
let mut options = vec!["format = 'parquet'".to_string()];
|
||||||
|
if let Some(start) = start_time {
|
||||||
|
options.push(format!("start_time = '{}'", start));
|
||||||
|
}
|
||||||
|
if let Some(end) = end_time {
|
||||||
|
options.push(format!("end_time = '{}'", end));
|
||||||
|
}
|
||||||
|
options.join(", ")
|
||||||
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::database::DatabaseClient;
|
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||||
use crate::{database, Tool};
|
use crate::{database, Tool};
|
||||||
|
|
||||||
@@ -76,18 +76,30 @@ pub struct ImportCommand {
|
|||||||
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
/// The default behavior will disable server-side default timeout(i.e. `0s`).
|
||||||
#[clap(long, value_parser = humantime::parse_duration)]
|
#[clap(long, value_parser = humantime::parse_duration)]
|
||||||
timeout: Option<Duration>,
|
timeout: Option<Duration>,
|
||||||
|
|
||||||
|
/// The proxy server address to connect, if set, will override the system proxy.
|
||||||
|
///
|
||||||
|
/// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
|
||||||
|
#[clap(long)]
|
||||||
|
proxy: Option<String>,
|
||||||
|
|
||||||
|
/// Disable proxy server, if set, will not use any proxy.
|
||||||
|
#[clap(long, default_value = "false")]
|
||||||
|
no_proxy: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ImportCommand {
|
impl ImportCommand {
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
let (catalog, schema) =
|
let (catalog, schema) =
|
||||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||||
|
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||||
let database_client = DatabaseClient::new(
|
let database_client = DatabaseClient::new(
|
||||||
self.addr.clone(),
|
self.addr.clone(),
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
self.auth_basic.clone(),
|
self.auth_basic.clone(),
|
||||||
// Treats `None` as `0s` to disable server-side default timeout.
|
// Treats `None` as `0s` to disable server-side default timeout.
|
||||||
self.timeout.unwrap_or_default(),
|
self.timeout.unwrap_or_default(),
|
||||||
|
proxy,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Box::new(Import {
|
Ok(Box::new(Import {
|
||||||
|
|||||||
@@ -10,9 +10,8 @@ name = "greptime"
|
|||||||
path = "src/bin/greptime.rs"
|
path = "src/bin/greptime.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["python", "servers/pprof", "servers/mem-prof"]
|
default = ["servers/pprof", "servers/mem-prof"]
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
python = ["frontend/python"]
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
@@ -58,6 +57,7 @@ humantime.workspace = true
|
|||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
|
metric-engine.workspace = true
|
||||||
mito2.workspace = true
|
mito2.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user