mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
323 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710a68d2d6 | ||
|
|
485a91f49a | ||
|
|
bd0eed7af9 | ||
|
|
b8b1e98399 | ||
|
|
abeb32e042 | ||
|
|
840e94630d | ||
|
|
43e3a77263 | ||
|
|
d1ee1ba56a | ||
|
|
feec4e289d | ||
|
|
718447c542 | ||
|
|
eadde72973 | ||
|
|
7c5c75568d | ||
|
|
1c9bf2e2a7 | ||
|
|
d061bf3d07 | ||
|
|
8ce8a8f3c7 | ||
|
|
bf635a6c7c | ||
|
|
196c06db14 | ||
|
|
99565a3676 | ||
|
|
c902d43380 | ||
|
|
95f172eb81 | ||
|
|
3bd2f79841 | ||
|
|
417be13400 | ||
|
|
0a9ad004a4 | ||
|
|
cf561df854 | ||
|
|
1641fd572a | ||
|
|
89129c99c8 | ||
|
|
48cd22d459 | ||
|
|
0d42651047 | ||
|
|
bab198ae68 | ||
|
|
d4ac8734bc | ||
|
|
4664cc601c | ||
|
|
06fd7fd210 | ||
|
|
d7b2e791b9 | ||
|
|
7d509e97f6 | ||
|
|
a7349b573b | ||
|
|
830a91c548 | ||
|
|
6221e5b105 | ||
|
|
43f01cc594 | ||
|
|
675767c023 | ||
|
|
054bca359e | ||
|
|
ff8c10eae7 | ||
|
|
b5c5458798 | ||
|
|
6a1f5751c6 | ||
|
|
8776b1204b | ||
|
|
bad89185c2 | ||
|
|
6c1c7d8d24 | ||
|
|
9da1f236d9 | ||
|
|
6ac47e939c | ||
|
|
7d1724f832 | ||
|
|
d2f49cbc2e | ||
|
|
97c3755ab6 | ||
|
|
5f8c17514f | ||
|
|
839e653e0d | ||
|
|
c7b36779c1 | ||
|
|
bbcac3a541 | ||
|
|
600cde1ff2 | ||
|
|
83de399bef | ||
|
|
6b8dbcfb54 | ||
|
|
3e6a564f8e | ||
|
|
ccbd49777d | ||
|
|
29fc2ea9d8 | ||
|
|
d180e41230 | ||
|
|
62d5fcbd76 | ||
|
|
d339191e29 | ||
|
|
029ff2f1e3 | ||
|
|
9af9c0229a | ||
|
|
4383a69876 | ||
|
|
033a065359 | ||
|
|
262a79a170 | ||
|
|
5dc7ce1791 | ||
|
|
e35a494a3f | ||
|
|
5dba373ede | ||
|
|
518bac35bc | ||
|
|
39f80876cd | ||
|
|
181e16a11a | ||
|
|
99dda93f0e | ||
|
|
d3da128d66 | ||
|
|
370ec04a9d | ||
|
|
c13d2fd11d | ||
|
|
3d651522c2 | ||
|
|
fec3fcf4ef | ||
|
|
3555e1644c | ||
|
|
c42168d7c2 | ||
|
|
3c24ca1a7a | ||
|
|
9531469660 | ||
|
|
880ca2e786 | ||
|
|
0ce2b50676 | ||
|
|
34635558d2 | ||
|
|
8a74bd36f5 | ||
|
|
cf6bba09fd | ||
|
|
89a0d3af1e | ||
|
|
47e51545dd | ||
|
|
1e22f1cb4f | ||
|
|
cf8b6c77dc | ||
|
|
6a57f4975e | ||
|
|
178018143d | ||
|
|
73227bbafd | ||
|
|
5a99f098c5 | ||
|
|
7cf9945161 | ||
|
|
bfb4794cfa | ||
|
|
58183fe72f | ||
|
|
09aa4b72a5 | ||
|
|
43f32f4499 | ||
|
|
ea80570cb1 | ||
|
|
cfe3a2c55e | ||
|
|
2cca267a32 | ||
|
|
f74715ce52 | ||
|
|
1141dbe946 | ||
|
|
a415685bf1 | ||
|
|
f9e7762c5b | ||
|
|
0b421b5177 | ||
|
|
aa89d9deef | ||
|
|
b3ffe5cd1e | ||
|
|
d6ef7a75de | ||
|
|
6344b1e0db | ||
|
|
7d506b3c5f | ||
|
|
96e12e9ee5 | ||
|
|
a9db80ab1a | ||
|
|
5f5dbe0172 | ||
|
|
dac7a41cbd | ||
|
|
de416465a6 | ||
|
|
58c13739f0 | ||
|
|
806400caff | ||
|
|
f78dab078c | ||
|
|
7a14db68a6 | ||
|
|
c26f2f94c0 | ||
|
|
781f2422b3 | ||
|
|
7e68ecc498 | ||
|
|
9ce9421850 | ||
|
|
c0df2b9086 | ||
|
|
29d344ccd2 | ||
|
|
fe2fc723bc | ||
|
|
2332305b90 | ||
|
|
9ccd182109 | ||
|
|
ae8153515b | ||
|
|
cce5edc88e | ||
|
|
616eb04914 | ||
|
|
7c53f92e4b | ||
|
|
445bd92c7a | ||
|
|
92a9802343 | ||
|
|
abbac46c05 | ||
|
|
d0d0f091f0 | ||
|
|
707a0d5626 | ||
|
|
e42767d500 | ||
|
|
ca18ccf7d4 | ||
|
|
b1d8812806 | ||
|
|
7547e7ebdf | ||
|
|
6100cb335a | ||
|
|
0badb3715e | ||
|
|
bd9c2f2666 | ||
|
|
b3edbef1f3 | ||
|
|
9e58bba363 | ||
|
|
3a4c9f2b45 | ||
|
|
64a36e9b36 | ||
|
|
33566ea0f0 | ||
|
|
ff8ab6763b | ||
|
|
00e4bd45f0 | ||
|
|
85eebcb16f | ||
|
|
102e43aace | ||
|
|
56fc77e573 | ||
|
|
4c76d4d97e | ||
|
|
9e5cdf47d9 | ||
|
|
bdb677dc52 | ||
|
|
99dbb7401c | ||
|
|
a7bbd61f28 | ||
|
|
efc5abfc02 | ||
|
|
43a7457e15 | ||
|
|
20f01219e9 | ||
|
|
dc351a6de9 | ||
|
|
5f87b1f714 | ||
|
|
b9146c88ff | ||
|
|
9558b3c201 | ||
|
|
da68d8ce4b | ||
|
|
01867adaa7 | ||
|
|
d9eeeee06e | ||
|
|
4fcda272fb | ||
|
|
ce959ddd3f | ||
|
|
730a3faa02 | ||
|
|
91820a8006 | ||
|
|
500e299e40 | ||
|
|
ac4b6cd7f0 | ||
|
|
3ab494764f | ||
|
|
5608035074 | ||
|
|
e083b8011c | ||
|
|
06327fba1e | ||
|
|
06da33b1ed | ||
|
|
2aa6ac5731 | ||
|
|
b28af9443b | ||
|
|
142035340d | ||
|
|
d2cf72e0f1 | ||
|
|
ae27fbc7f2 | ||
|
|
9bd10134dd | ||
|
|
3329da5b72 | ||
|
|
a24f8c96b3 | ||
|
|
a691cff0c4 | ||
|
|
f92b55c745 | ||
|
|
a9e5b902fd | ||
|
|
5b978269cc | ||
|
|
3dffc7b62c | ||
|
|
968c872d15 | ||
|
|
e2a770f8de | ||
|
|
dc46e96879 | ||
|
|
8f3b299a45 | ||
|
|
506e6887f3 | ||
|
|
1757061272 | ||
|
|
6599bb5a46 | ||
|
|
3f981ef2b3 | ||
|
|
5cff735e02 | ||
|
|
f5eede4ce1 | ||
|
|
22ee45f3df | ||
|
|
8fd0766754 | ||
|
|
af7107565a | ||
|
|
f02dc0e274 | ||
|
|
b53537e69b | ||
|
|
0cd6dacb45 | ||
|
|
a3611516a2 | ||
|
|
93f21b188d | ||
|
|
b9a7c2db7e | ||
|
|
c62ba79759 | ||
|
|
9d029f7337 | ||
|
|
f1e8afcda9 | ||
|
|
9697632888 | ||
|
|
69ee2c336c | ||
|
|
1f57c6b1f0 | ||
|
|
53a5864944 | ||
|
|
5b70881098 | ||
|
|
06d273b75a | ||
|
|
b382900c5c | ||
|
|
c79bb5a936 | ||
|
|
7e0dcfc797 | ||
|
|
51ddebdc73 | ||
|
|
e9f7579091 | ||
|
|
f387a09535 | ||
|
|
cf94d3295f | ||
|
|
0a91335e24 | ||
|
|
6fd04e38a3 | ||
|
|
bbaae9223a | ||
|
|
060864d0c1 | ||
|
|
395632c874 | ||
|
|
0dca63bc7b | ||
|
|
7323d727c9 | ||
|
|
68f92ecf08 | ||
|
|
39d52f25bf | ||
|
|
fb8d0c6ce5 | ||
|
|
ce867fb583 | ||
|
|
04a8fc5138 | ||
|
|
479ffe5a0f | ||
|
|
4b48c716b2 | ||
|
|
a9137b77f0 | ||
|
|
5f3bbdca4f | ||
|
|
7bd137f398 | ||
|
|
15a0775a3c | ||
|
|
180bc64cb0 | ||
|
|
e3320c531d | ||
|
|
d77003fb3b | ||
|
|
54ed7529ca | ||
|
|
465c8f714e | ||
|
|
88eb69530a | ||
|
|
36c0742c45 | ||
|
|
84bcca9117 | ||
|
|
d2f3793d15 | ||
|
|
000e1471eb | ||
|
|
d0ff8ab191 | ||
|
|
bd177b8cc4 | ||
|
|
958ff3f185 | ||
|
|
5d8b0e8154 | ||
|
|
84490f56b8 | ||
|
|
cb97768004 | ||
|
|
f08a35d6b9 | ||
|
|
e8adaaf5f7 | ||
|
|
a63fa76b7b | ||
|
|
102e4c975d | ||
|
|
16a3257ada | ||
|
|
01fdbf3626 | ||
|
|
97897aaf9b | ||
|
|
1fc42a681f | ||
|
|
fbc8f56eaa | ||
|
|
44280f7c9d | ||
|
|
0fbde48655 | ||
|
|
9dcfd28f61 | ||
|
|
82dbc3e1ae | ||
|
|
4d478658b5 | ||
|
|
89ebe47cd9 | ||
|
|
212ea2c25c | ||
|
|
1658d088ab | ||
|
|
346b57cf10 | ||
|
|
e1dcf83326 | ||
|
|
b5d9d635eb | ||
|
|
88dd78a69c | ||
|
|
6439b929b3 | ||
|
|
ba15c14103 | ||
|
|
d57b144b2f | ||
|
|
46e106bcc3 | ||
|
|
a7507a2b12 | ||
|
|
5b8e5066a0 | ||
|
|
dcd481e6a4 | ||
|
|
3217b56cc1 | ||
|
|
eccad647d0 | ||
|
|
829db8c5c1 | ||
|
|
9056c3a6aa | ||
|
|
d9e7b898a3 | ||
|
|
59d4081f7a | ||
|
|
6e87ac0a0e | ||
|
|
d89cfd0d4d | ||
|
|
8a0054aa89 | ||
|
|
f859932745 | ||
|
|
9a8fc08e6a | ||
|
|
825e4beead | ||
|
|
0a23b40321 | ||
|
|
cf6ef0a30d | ||
|
|
65a659d136 | ||
|
|
62bcb45787 | ||
|
|
94f3542a4f | ||
|
|
fc3bc5327d | ||
|
|
9e33ddceea | ||
|
|
c9bdf4ff9f | ||
|
|
0a9972aa9a | ||
|
|
76d5b710c8 | ||
|
|
fe02366ce6 | ||
|
|
d7aeb369a6 | ||
|
|
9284bb7a2b | ||
|
|
e23dd5a44f | ||
|
|
c60b59adc8 |
@@ -12,9 +12,4 @@ rustflags = [
|
|||||||
"-Wclippy::print_stdout",
|
"-Wclippy::print_stdout",
|
||||||
"-Wclippy::print_stderr",
|
"-Wclippy::print_stderr",
|
||||||
"-Wclippy::implicit_clone",
|
"-Wclippy::implicit_clone",
|
||||||
|
|
||||||
# It seems clippy has made a false positive decision here when upgrading rust toolchain to
|
|
||||||
# nightly-2023-08-07, we do need it to be borrowed mutably.
|
|
||||||
# Allow it for now; try disallow it when the toolchain is upgraded in the future.
|
|
||||||
"-Aclippy::needless_pass_by_ref_mut",
|
|
||||||
]
|
]
|
||||||
|
|||||||
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
35
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -41,13 +41,27 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: reproduce
|
||||||
attributes:
|
attributes:
|
||||||
label: What happened?
|
label: Minimal reproduce step
|
||||||
description: |
|
description: |
|
||||||
Tell us what happened and also what you would have expected to
|
Please walk us through and provide steps and details on how
|
||||||
happen instead.
|
to reproduce the issue. If possible, provide scripts that we
|
||||||
placeholder: "Describe the bug"
|
can run to trigger the bug.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected-manner
|
||||||
|
attributes:
|
||||||
|
label: What did you expect to see?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: actual-manner
|
||||||
|
attributes:
|
||||||
|
label: What did you see instead?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
@@ -72,14 +86,3 @@ body:
|
|||||||
trace. This will be automatically formatted into code, so no
|
trace. This will be automatically formatted into code, so no
|
||||||
need for backticks.
|
need for backticks.
|
||||||
render: bash
|
render: bash
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: reproduce
|
|
||||||
attributes:
|
|
||||||
label: How can we reproduce the bug?
|
|
||||||
description: |
|
|
||||||
Please walk us through and provide steps and details on how
|
|
||||||
to reproduce the issue. If possible, provide scripts that we
|
|
||||||
can run to trigger the bug.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|||||||
103
.github/actions/build-dev-builder-image/action.yml
vendored
103
.github/actions/build-dev-builder-image/action.yml
vendored
@@ -1,103 +0,0 @@
|
|||||||
name: Build and push dev-builder image
|
|
||||||
description: Build and push dev-builder image to DockerHub and ACR
|
|
||||||
inputs:
|
|
||||||
dockerhub-image-registry:
|
|
||||||
description: The dockerhub image registry to store the images
|
|
||||||
required: false
|
|
||||||
default: docker.io
|
|
||||||
dockerhub-image-registry-username:
|
|
||||||
description: The dockerhub username to login to the image registry
|
|
||||||
required: true
|
|
||||||
dockerhub-image-registry-token:
|
|
||||||
description: The dockerhub token to login to the image registry
|
|
||||||
required: true
|
|
||||||
dockerhub-image-namespace:
|
|
||||||
description: The dockerhub namespace of the image registry to store the images
|
|
||||||
required: false
|
|
||||||
default: greptime
|
|
||||||
acr-image-registry:
|
|
||||||
description: The ACR image registry to store the images
|
|
||||||
required: true
|
|
||||||
acr-image-registry-username:
|
|
||||||
description: The ACR username to login to the image registry
|
|
||||||
required: true
|
|
||||||
acr-image-registry-password:
|
|
||||||
description: The ACR password to login to the image registry
|
|
||||||
required: true
|
|
||||||
acr-image-namespace:
|
|
||||||
description: The ACR namespace of the image registry to store the images
|
|
||||||
required: false
|
|
||||||
default: greptime
|
|
||||||
version:
|
|
||||||
description: Version of the dev-builder
|
|
||||||
required: false
|
|
||||||
default: latest
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Login to Dockerhub
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ${{ inputs.dockerhub-image-registry }}
|
|
||||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
|
||||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
|
||||||
|
|
||||||
- name: Build and push ubuntu dev builder image to dockerhub
|
|
||||||
shell: bash
|
|
||||||
run:
|
|
||||||
make dev-builder \
|
|
||||||
BASE_IMAGE=ubuntu \
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
|
||||||
|
|
||||||
- name: Build and push centos dev builder image to dockerhub
|
|
||||||
shell: bash
|
|
||||||
run:
|
|
||||||
make dev-builder \
|
|
||||||
BASE_IMAGE=centos \
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
|
||||||
|
|
||||||
- name: Build and push android dev builder image to dockerhub
|
|
||||||
shell: bash
|
|
||||||
run:
|
|
||||||
make dev-builder \
|
|
||||||
BASE_IMAGE=android \
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
|
||||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
|
||||||
|
|
||||||
- name: Login to ACR
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
continue-on-error: true
|
|
||||||
with:
|
|
||||||
registry: ${{ inputs.acr-image-registry }}
|
|
||||||
username: ${{ inputs.acr-image-registry-username }}
|
|
||||||
password: ${{ inputs.acr-image-registry-password }}
|
|
||||||
|
|
||||||
- name: Build and push ubuntu dev builder image to ACR
|
|
||||||
shell: bash
|
|
||||||
continue-on-error: true
|
|
||||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
|
||||||
make dev-builder \
|
|
||||||
BASE_IMAGE=ubuntu \
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
|
||||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
|
||||||
|
|
||||||
- name: Build and push centos dev builder image to ACR
|
|
||||||
shell: bash
|
|
||||||
continue-on-error: true
|
|
||||||
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
|
|
||||||
make dev-builder \
|
|
||||||
BASE_IMAGE=centos \
|
|
||||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
|
||||||
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
|
|
||||||
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
|
|
||||||
IMAGE_TAG=${{ inputs.version }}
|
|
||||||
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
76
.github/actions/build-dev-builder-images/action.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
name: Build and push dev-builder images
|
||||||
|
description: Build and push dev-builder images to DockerHub and ACR
|
||||||
|
inputs:
|
||||||
|
dockerhub-image-registry:
|
||||||
|
description: The dockerhub image registry to store the images
|
||||||
|
required: false
|
||||||
|
default: docker.io
|
||||||
|
dockerhub-image-registry-username:
|
||||||
|
description: The dockerhub username to login to the image registry
|
||||||
|
required: true
|
||||||
|
dockerhub-image-registry-token:
|
||||||
|
description: The dockerhub token to login to the image registry
|
||||||
|
required: true
|
||||||
|
dockerhub-image-namespace:
|
||||||
|
description: The dockerhub namespace of the image registry to store the images
|
||||||
|
required: false
|
||||||
|
default: greptime
|
||||||
|
version:
|
||||||
|
description: Version of the dev-builder
|
||||||
|
required: false
|
||||||
|
default: latest
|
||||||
|
build-dev-builder-ubuntu:
|
||||||
|
description: Build dev-builder-ubuntu image
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
build-dev-builder-centos:
|
||||||
|
description: Build dev-builder-centos image
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
build-dev-builder-android:
|
||||||
|
description: Build dev-builder-android image
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Login to Dockerhub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ inputs.dockerhub-image-registry }}
|
||||||
|
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||||
|
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-ubuntu image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||||
|
run: |
|
||||||
|
make dev-builder \
|
||||||
|
BASE_IMAGE=ubuntu \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-centos image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||||
|
run: |
|
||||||
|
make dev-builder \
|
||||||
|
BASE_IMAGE=centos \
|
||||||
|
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
|
IMAGE_TAG=${{ inputs.version }}
|
||||||
|
|
||||||
|
- name: Build and push dev-builder-android image # Only build image for amd64 platform.
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.build-dev-builder-android == 'true' }}
|
||||||
|
run: |
|
||||||
|
make dev-builder \
|
||||||
|
BASE_IMAGE=android \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||||
|
IMAGE_TAG=${{ inputs.version }} && \
|
||||||
|
|
||||||
|
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||||
52
.github/actions/build-greptime-binary/action.yml
vendored
52
.github/actions/build-greptime-binary/action.yml
vendored
@@ -16,35 +16,20 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
required: true
|
required: true
|
||||||
release-to-s3-bucket:
|
|
||||||
description: S3 bucket to store released artifacts
|
|
||||||
required: true
|
|
||||||
aws-access-key-id:
|
|
||||||
description: AWS access key id
|
|
||||||
required: true
|
|
||||||
aws-secret-access-key:
|
|
||||||
description: AWS secret access key
|
|
||||||
required: true
|
|
||||||
aws-region:
|
|
||||||
description: AWS region
|
|
||||||
required: true
|
|
||||||
upload-to-s3:
|
|
||||||
description: Upload to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
upload-latest-artifacts:
|
|
||||||
description: Upload the latest artifacts to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
default: .
|
default: .
|
||||||
|
build-android-artifacts:
|
||||||
|
description: Build android artifacts
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && \
|
cd ${{ inputs.working-dir }} && \
|
||||||
make build-by-dev-builder \
|
make build-by-dev-builder \
|
||||||
@@ -54,14 +39,27 @@ runs:
|
|||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
|
env:
|
||||||
|
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
|
||||||
with:
|
with:
|
||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: ./target/${{ inputs.cargo-profile }}/greptime
|
target-file: ./target/$PROFILE_TARGET/greptime
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
|
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
|
||||||
|
- name: Build greptime binary
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
|
run: |
|
||||||
|
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||||
|
|
||||||
|
- name: Upload android artifacts
|
||||||
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
|
with:
|
||||||
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
|
target-file: ./target/aarch64-linux-android/release/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
|
||||||
aws-region: ${{ inputs.aws-region }}
|
|
||||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
|
||||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|||||||
48
.github/actions/build-linux-artifacts/action.yml
vendored
48
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -13,30 +13,10 @@ inputs:
|
|||||||
disable-run-tests:
|
disable-run-tests:
|
||||||
description: Disable running integration tests
|
description: Disable running integration tests
|
||||||
required: true
|
required: true
|
||||||
release-to-s3-bucket:
|
|
||||||
description: S3 bucket to store released artifacts
|
|
||||||
required: true
|
|
||||||
aws-access-key-id:
|
|
||||||
description: AWS access key id
|
|
||||||
required: true
|
|
||||||
aws-secret-access-key:
|
|
||||||
description: AWS secret access key
|
|
||||||
required: true
|
|
||||||
aws-region:
|
|
||||||
description: AWS region
|
|
||||||
required: true
|
|
||||||
dev-mode:
|
dev-mode:
|
||||||
description: Enable dev mode, only build standard greptime
|
description: Enable dev mode, only build standard greptime
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
upload-to-s3:
|
|
||||||
description: Upload to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
upload-latest-artifacts:
|
|
||||||
description: Upload the latest artifacts to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -68,12 +48,6 @@ runs:
|
|||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
|
||||||
aws-region: ${{ inputs.aws-region }}
|
|
||||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
|
||||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
- name: Build greptime without pyo3
|
- name: Build greptime without pyo3
|
||||||
@@ -85,12 +59,6 @@ runs:
|
|||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
|
||||||
aws-region: ${{ inputs.aws-region }}
|
|
||||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
|
||||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
@@ -107,10 +75,14 @@ runs:
|
|||||||
cargo-profile: ${{ inputs.cargo-profile }}
|
cargo-profile: ${{ inputs.cargo-profile }}
|
||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
|
||||||
aws-region: ${{ inputs.aws-region }}
|
|
||||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
|
||||||
upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
|
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
|
||||||
|
- name: Build greptime on android base image
|
||||||
|
uses: ./.github/actions/build-greptime-binary
|
||||||
|
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||||
|
with:
|
||||||
|
base-image: android
|
||||||
|
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
build-android-artifacts: true
|
||||||
|
|||||||
21
.github/actions/build-macos-artifacts/action.yml
vendored
21
.github/actions/build-macos-artifacts/action.yml
vendored
@@ -19,25 +19,9 @@ inputs:
|
|||||||
disable-run-tests:
|
disable-run-tests:
|
||||||
description: Disable running integration tests
|
description: Disable running integration tests
|
||||||
required: true
|
required: true
|
||||||
release-to-s3-bucket:
|
|
||||||
description: S3 bucket to store released artifacts
|
|
||||||
required: true
|
|
||||||
artifacts-dir:
|
artifacts-dir:
|
||||||
description: Directory to store artifacts
|
description: Directory to store artifacts
|
||||||
required: true
|
required: true
|
||||||
aws-access-key-id:
|
|
||||||
description: AWS access key id
|
|
||||||
required: true
|
|
||||||
aws-secret-access-key:
|
|
||||||
description: AWS secret access key
|
|
||||||
required: true
|
|
||||||
aws-region:
|
|
||||||
description: AWS region
|
|
||||||
required: true
|
|
||||||
upload-to-s3:
|
|
||||||
description: Upload to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -103,8 +87,3 @@ runs:
|
|||||||
artifacts-dir: ${{ inputs.artifacts-dir }}
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
|
|
||||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
|
||||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
|
||||||
aws-region: ${{ inputs.aws-region }}
|
|
||||||
upload-to-s3: ${{ inputs.upload-to-s3 }}
|
|
||||||
|
|||||||
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
80
.github/actions/build-windows-artifacts/action.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
name: Build Windows artifacts
|
||||||
|
description: Build Windows artifacts
|
||||||
|
inputs:
|
||||||
|
arch:
|
||||||
|
description: Architecture to build
|
||||||
|
required: true
|
||||||
|
rust-toolchain:
|
||||||
|
description: Rust toolchain to use
|
||||||
|
required: true
|
||||||
|
cargo-profile:
|
||||||
|
description: Cargo profile to build
|
||||||
|
required: true
|
||||||
|
features:
|
||||||
|
description: Cargo features to build
|
||||||
|
required: true
|
||||||
|
version:
|
||||||
|
description: Version of the artifact
|
||||||
|
required: true
|
||||||
|
disable-run-tests:
|
||||||
|
description: Disable running integration tests
|
||||||
|
required: true
|
||||||
|
artifacts-dir:
|
||||||
|
description: Directory to store artifacts
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: arduino/setup-protoc@v1
|
||||||
|
|
||||||
|
- name: Install rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ inputs.rust-toolchain }}
|
||||||
|
targets: ${{ inputs.arch }}
|
||||||
|
components: llvm-tools-preview
|
||||||
|
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
shell: pwsh
|
||||||
|
run: pip install pyarrow
|
||||||
|
|
||||||
|
- name: Install WSL distribution
|
||||||
|
uses: Vampire/setup-wsl@v2
|
||||||
|
with:
|
||||||
|
distribution: Ubuntu-22.04
|
||||||
|
|
||||||
|
- name: Install latest nextest release # For integration tests.
|
||||||
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
|
- name: Run integration tests
|
||||||
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
|
shell: pwsh
|
||||||
|
run: make test sqlness-test
|
||||||
|
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: sqlness-logs
|
||||||
|
path: ${{ runner.temp }}/greptime-*.log
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
- name: Build greptime binary
|
||||||
|
shell: pwsh
|
||||||
|
run: cargo build --profile ${{ inputs.cargo-profile }} --features ${{ inputs.features }} --target ${{ inputs.arch }}
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
with:
|
||||||
|
artifacts-dir: ${{ inputs.artifacts-dir }}
|
||||||
|
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
|
||||||
|
version: ${{ inputs.version }}
|
||||||
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
31
.github/actions/deploy-greptimedb/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Deploy GreptimeDB cluster
|
||||||
|
description: Deploy GreptimeDB cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
aws-ci-test-bucket:
|
||||||
|
description: 'AWS S3 bucket name for testing'
|
||||||
|
required: true
|
||||||
|
aws-region:
|
||||||
|
description: 'AWS region for testing'
|
||||||
|
required: true
|
||||||
|
data-root:
|
||||||
|
description: 'Data root for testing'
|
||||||
|
required: true
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS access key id for testing'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS secret access key for testing'
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Deploy GreptimeDB by Helm
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DATA_ROOT: ${{ inputs.data-root }}
|
||||||
|
AWS_CI_TEST_BUCKET: ${{ inputs.aws-ci-test-bucket }}
|
||||||
|
AWS_REGION: ${{ inputs.aws-region }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/deploy-greptimedb.sh
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
name: Release artifacts
|
name: Publish GitHub release
|
||||||
description: Release artifacts
|
description: Publish GitHub release
|
||||||
inputs:
|
inputs:
|
||||||
version:
|
version:
|
||||||
description: Version to release
|
description: Version to release
|
||||||
@@ -31,10 +31,12 @@ runs:
|
|||||||
echo "prerelease=false" >> $GITHUB_ENV
|
echo "prerelease=false" >> $GITHUB_ENV
|
||||||
echo "makeLatest=true" >> $GITHUB_ENV
|
echo "makeLatest=true" >> $GITHUB_ENV
|
||||||
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
echo "generateReleaseNotes=false" >> $GITHUB_ENV
|
||||||
|
echo "omitBody=true" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
echo "prerelease=true" >> $GITHUB_ENV
|
echo "prerelease=true" >> $GITHUB_ENV
|
||||||
echo "makeLatest=false" >> $GITHUB_ENV
|
echo "makeLatest=false" >> $GITHUB_ENV
|
||||||
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
echo "generateReleaseNotes=true" >> $GITHUB_ENV
|
||||||
|
echo "omitBody=false" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Publish release
|
- name: Publish release
|
||||||
@@ -45,6 +47,7 @@ runs:
|
|||||||
makeLatest: ${{ env.makeLatest }}
|
makeLatest: ${{ env.makeLatest }}
|
||||||
tag: ${{ inputs.version }}
|
tag: ${{ inputs.version }}
|
||||||
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
generateReleaseNotes: ${{ env.generateReleaseNotes }}
|
||||||
|
omitBody: ${{ env.omitBody }} # omitBody is true when the release is a official release.
|
||||||
allowUpdates: true
|
allowUpdates: true
|
||||||
artifacts: |
|
artifacts: |
|
||||||
**/greptime-*/*
|
**/greptime-*/*
|
||||||
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
138
.github/actions/release-cn-artifacts/action.yaml
vendored
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
name: Release CN artifacts
|
||||||
|
description: Release artifacts to CN region
|
||||||
|
inputs:
|
||||||
|
src-image-registry:
|
||||||
|
description: The source image registry to store the images
|
||||||
|
required: true
|
||||||
|
default: docker.io
|
||||||
|
src-image-namespace:
|
||||||
|
description: The namespace of the source image registry to store the images
|
||||||
|
required: true
|
||||||
|
default: greptime
|
||||||
|
src-image-name:
|
||||||
|
description: The name of the source image
|
||||||
|
required: false
|
||||||
|
default: greptimedb
|
||||||
|
dst-image-registry:
|
||||||
|
description: The destination image registry to store the images
|
||||||
|
required: true
|
||||||
|
dst-image-namespace:
|
||||||
|
description: The namespace of the destination image registry to store the images
|
||||||
|
required: true
|
||||||
|
default: greptime
|
||||||
|
dst-image-registry-username:
|
||||||
|
description: The username to login to the image registry
|
||||||
|
required: true
|
||||||
|
dst-image-registry-password:
|
||||||
|
description: The password to login to the image registry
|
||||||
|
required: true
|
||||||
|
version:
|
||||||
|
description: Version of the artifact
|
||||||
|
required: true
|
||||||
|
dev-mode:
|
||||||
|
description: Enable dev mode, only push standard greptime
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
push-latest-tag:
|
||||||
|
description: Whether to push the latest tag of the image
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
aws-cn-s3-bucket:
|
||||||
|
description: S3 bucket to store released artifacts in CN region
|
||||||
|
required: true
|
||||||
|
aws-cn-access-key-id:
|
||||||
|
description: AWS access key id in CN region
|
||||||
|
required: true
|
||||||
|
aws-cn-secret-access-key:
|
||||||
|
description: AWS secret access key in CN region
|
||||||
|
required: true
|
||||||
|
aws-cn-region:
|
||||||
|
description: AWS region in CN
|
||||||
|
required: true
|
||||||
|
upload-to-s3:
|
||||||
|
description: Upload to S3
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
artifacts-dir:
|
||||||
|
description: Directory to store artifacts
|
||||||
|
required: false
|
||||||
|
default: 'artifacts'
|
||||||
|
update-version-info:
|
||||||
|
description: Update the version info in S3
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
upload-max-retry-times:
|
||||||
|
description: Max retry times for uploading artifacts to S3
|
||||||
|
required: false
|
||||||
|
default: "20"
|
||||||
|
upload-retry-timeout:
|
||||||
|
description: Timeout for uploading artifacts to S3
|
||||||
|
required: false
|
||||||
|
default: "30" # minutes
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
|
- name: Release artifacts to cn region
|
||||||
|
uses: nick-invision/retry@v2
|
||||||
|
if: ${{ inputs.upload-to-s3 == 'true' }}
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
|
||||||
|
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
|
||||||
|
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
|
||||||
|
with:
|
||||||
|
max_attempts: ${{ inputs.upload-max-retry-times }}
|
||||||
|
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
||||||
|
command: |
|
||||||
|
./.github/scripts/upload-artifacts-to-s3.sh \
|
||||||
|
${{ inputs.artifacts-dir }} \
|
||||||
|
${{ inputs.version }} \
|
||||||
|
${{ inputs.aws-cn-s3-bucket }}
|
||||||
|
|
||||||
|
- name: Push greptimedb image from Dockerhub to ACR
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/copy-image.sh \
|
||||||
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
|
||||||
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
|
- name: Push latest greptimedb image from Dockerhub to ACR
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.push-latest-tag == 'true' }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/copy-image.sh \
|
||||||
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
|
||||||
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
|
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.dev-mode == 'false' }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/copy-image.sh \
|
||||||
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||||
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
|
|
||||||
|
- name: Push greptimedb-centos image from DockerHub to ACR
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
|
||||||
|
run: |
|
||||||
|
./.github/scripts/copy-image.sh \
|
||||||
|
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
|
||||||
|
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
|
||||||
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
59
.github/actions/sqlness-test/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: Run sqlness test
|
||||||
|
description: Run sqlness test on GreptimeDB
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
aws-ci-test-bucket:
|
||||||
|
description: 'AWS S3 bucket name for testing'
|
||||||
|
required: true
|
||||||
|
aws-region:
|
||||||
|
description: 'AWS region for testing'
|
||||||
|
required: true
|
||||||
|
data-root:
|
||||||
|
description: 'Data root for testing'
|
||||||
|
required: true
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS access key id for testing'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS secret access key for testing'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Deploy GreptimeDB cluster by Helm
|
||||||
|
uses: ./.github/actions/deploy-greptimedb
|
||||||
|
with:
|
||||||
|
data-root: ${{ inputs.data-root }}
|
||||||
|
aws-ci-test-bucket: ${{ inputs.aws-ci-test-bucket }}
|
||||||
|
aws-region: ${{ inputs.aws-region }}
|
||||||
|
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||||
|
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||||
|
|
||||||
|
# TODO(zyy17): The following tests will be replaced by the real sqlness test.
|
||||||
|
- name: Run tests on greptimedb cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 14002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 14002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Run tests on greptimedb cluster that uses S3
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 24002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 24002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Run tests on standalone greptimedb
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mysql -h 127.0.0.1 -P 34002 -e "CREATE TABLE IF NOT EXISTS system_metrics (host VARCHAR(255), idc VARCHAR(255), cpu_util DOUBLE, memory_util DOUBLE, disk_util DOUBLE, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(host, idc), TIME INDEX(ts));" && \
|
||||||
|
mysql -h 127.0.0.1 -P 34002 -e "SHOW TABLES;"
|
||||||
|
|
||||||
|
- name: Clean S3 data
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
run: |
|
||||||
|
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||||
90
.github/actions/upload-artifacts/action.yml
vendored
90
.github/actions/upload-artifacts/action.yml
vendored
@@ -10,34 +10,6 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: Version of the artifact
|
description: Version of the artifact
|
||||||
required: true
|
required: true
|
||||||
release-to-s3-bucket:
|
|
||||||
description: S3 bucket to store released artifacts
|
|
||||||
required: true
|
|
||||||
aws-access-key-id:
|
|
||||||
description: AWS access key id
|
|
||||||
required: true
|
|
||||||
aws-secret-access-key:
|
|
||||||
description: AWS secret access key
|
|
||||||
required: true
|
|
||||||
aws-region:
|
|
||||||
description: AWS region
|
|
||||||
required: true
|
|
||||||
upload-to-s3:
|
|
||||||
description: Upload to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
upload-latest-artifacts:
|
|
||||||
description: Upload the latest artifacts to S3
|
|
||||||
required: false
|
|
||||||
default: 'true'
|
|
||||||
upload-max-retry-times:
|
|
||||||
description: Max retry times for uploading artifacts to S3
|
|
||||||
required: false
|
|
||||||
default: "20"
|
|
||||||
upload-retry-timeout:
|
|
||||||
description: Timeout for uploading artifacts to S3
|
|
||||||
required: false
|
|
||||||
default: "30" # minutes
|
|
||||||
working-dir:
|
working-dir:
|
||||||
description: Working directory to upload the artifacts
|
description: Working directory to upload the artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -50,7 +22,7 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ${{ inputs.artifacts-dir }} && \
|
mkdir -p ${{ inputs.artifacts-dir }} && \
|
||||||
mv ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
# The compressed artifacts will use the following layout:
|
# The compressed artifacts will use the following layout:
|
||||||
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
# greptime-linux-amd64-pyo3-v0.3.0sha256sum
|
||||||
@@ -61,9 +33,21 @@ runs:
|
|||||||
working-directory: ${{ inputs.working-dir }}
|
working-directory: ${{ inputs.working-dir }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }} && \
|
tar -zcvf ${{ inputs.artifacts-dir }}.tar.gz ${{ inputs.artifacts-dir }}
|
||||||
|
|
||||||
|
- name: Calculate checksum
|
||||||
|
if: runner.os != 'Windows'
|
||||||
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
echo $(shasum -a 256 ${{ inputs.artifacts-dir }}.tar.gz | cut -f1 -d' ') > ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|
||||||
|
- name: Calculate checksum on Windows
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
working-directory: ${{ inputs.working-dir }}
|
||||||
|
shell: pwsh
|
||||||
|
run: Get-FileHash ${{ inputs.artifacts-dir }}.tar.gz -Algorithm SHA256 | select -ExpandProperty Hash > ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|
||||||
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
# Note: The artifacts will be double zip compressed(related issue: https://github.com/actions/upload-artifact/issues/39).
|
||||||
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
# However, when we use 'actions/download-artifact@v3' to download the artifacts, it will be automatically unzipped.
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
@@ -77,49 +61,3 @@ runs:
|
|||||||
with:
|
with:
|
||||||
name: ${{ inputs.artifacts-dir }}.sha256sum
|
name: ${{ inputs.artifacts-dir }}.sha256sum
|
||||||
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
|
||||||
|
|
||||||
- name: Upload artifacts to S3
|
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' }}
|
|
||||||
uses: nick-invision/retry@v2
|
|
||||||
env:
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
|
||||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
|
||||||
with:
|
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
|
||||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
|
||||||
# The bucket layout will be:
|
|
||||||
# releases/greptimedb
|
|
||||||
# ├── v0.1.0
|
|
||||||
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
|
||||||
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
|
||||||
# └── v0.2.0
|
|
||||||
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
|
||||||
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
|
||||||
command: |
|
|
||||||
cd ${{ inputs.working-dir }} && \
|
|
||||||
aws s3 cp \
|
|
||||||
${{ inputs.artifacts-dir }}.tar.gz \
|
|
||||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
|
|
||||||
aws s3 cp \
|
|
||||||
${{ inputs.artifacts-dir }}.sha256sum \
|
|
||||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
|
|
||||||
|
|
||||||
- name: Upload latest artifacts to S3
|
|
||||||
if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
|
|
||||||
uses: nick-invision/retry@v2
|
|
||||||
env:
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
|
||||||
AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
|
|
||||||
with:
|
|
||||||
max_attempts: ${{ inputs.upload-max-retry-times }}
|
|
||||||
timeout_minutes: ${{ inputs.upload-retry-timeout }}
|
|
||||||
command: |
|
|
||||||
cd ${{ inputs.working-dir }} && \
|
|
||||||
aws s3 cp \
|
|
||||||
${{ inputs.artifacts-dir }}.tar.gz \
|
|
||||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
|
|
||||||
aws s3 cp \
|
|
||||||
${{ inputs.artifacts-dir }}.sha256sum \
|
|
||||||
s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum
|
|
||||||
|
|||||||
4
.github/doc-label-config.yml
vendored
Normal file
4
.github/doc-label-config.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Doc not needed:
|
||||||
|
- '- \[x\] This PR does not require documentation updates.'
|
||||||
|
Doc update required:
|
||||||
|
- '- \[ \] This PR does not require documentation updates.'
|
||||||
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -15,5 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
|
- [ ] This PR does not require documentation updates.
|
||||||
|
|
||||||
## Refer to a related PR or issue link (optional)
|
## Refer to a related PR or issue link (optional)
|
||||||
|
|||||||
47
.github/scripts/copy-image.sh
vendored
Executable file
47
.github/scripts/copy-image.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
SRC_IMAGE=$1
|
||||||
|
DST_REGISTRY=$2
|
||||||
|
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"
|
||||||
|
|
||||||
|
# Check if necessary variables are set.
|
||||||
|
function check_vars() {
|
||||||
|
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
|
||||||
|
if [ -z "${!var}" ]; then
|
||||||
|
echo "$var is not set or empty."
|
||||||
|
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Copies images from DockerHub to the destination registry.
|
||||||
|
function copy_images_from_dockerhub() {
|
||||||
|
# Check if docker is installed.
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "docker is not installed. Please install docker to continue."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract the name and tag of the source image.
|
||||||
|
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")
|
||||||
|
|
||||||
|
echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"
|
||||||
|
|
||||||
|
docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
|
||||||
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
|
docker://"$DST_REGISTRY/$IMAGE_NAME"
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
check_vars
|
||||||
|
copy_images_from_dockerhub
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usage example:
|
||||||
|
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
|
||||||
|
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
|
||||||
|
main
|
||||||
169
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
169
.github/scripts/deploy-greptimedb.sh
vendored
Executable file
@@ -0,0 +1,169 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
|
||||||
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
|
|
||||||
|
# Ceate a cluster with 1 control-plane node and 5 workers.
|
||||||
|
function create_kind_cluster() {
|
||||||
|
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
|
||||||
|
kind: Cluster
|
||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
- role: worker
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add greptime Helm chart repo.
|
||||||
|
function add_greptime_chart() {
|
||||||
|
helm repo add greptime "$GREPTIME_CHART"
|
||||||
|
helm repo update
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy a etcd cluster with 3 members.
|
||||||
|
function deploy_etcd_cluster() {
|
||||||
|
local namespace="$1"
|
||||||
|
|
||||||
|
helm install etcd "$ETCD_CHART" \
|
||||||
|
--set replicaCount=3 \
|
||||||
|
--set auth.rbac.create=false \
|
||||||
|
--set auth.rbac.token.enabled=false \
|
||||||
|
-n "$namespace"
|
||||||
|
|
||||||
|
# Wait for etcd cluster to be ready.
|
||||||
|
kubectl rollout status statefulset/etcd -n "$namespace"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb-operator.
|
||||||
|
function deploy_greptimedb_operator() {
|
||||||
|
# Use the latest chart and image.
|
||||||
|
helm install greptimedb-operator greptime/greptimedb-operator \
|
||||||
|
--set image.tag=latest \
|
||||||
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Wait for greptimedb-operator to be ready.
|
||||||
|
kubectl rollout status deployment/greptimedb-operator -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb cluster by using local storage.
|
||||||
|
# It will expose cluster service ports as '14000', '14001', '14002', '14003' to local access.
|
||||||
|
function deploy_greptimedb_cluster() {
|
||||||
|
local cluster_name=$1
|
||||||
|
local install_namespace=$2
|
||||||
|
|
||||||
|
kubectl create ns "$install_namespace"
|
||||||
|
|
||||||
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
|
-n "$install_namespace"
|
||||||
|
|
||||||
|
# Wait for greptimedb cluster to be ready.
|
||||||
|
while true; do
|
||||||
|
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||||
|
if [ "$PHASE" == "Running" ]; then
|
||||||
|
echo "Cluster is ready"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||||
|
sleep 5 # wait for 5 seconds before check again.
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Expose greptimedb cluster to local access.
|
||||||
|
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||||
|
14000:4000 \
|
||||||
|
14001:4001 \
|
||||||
|
14002:4002 \
|
||||||
|
14003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy greptimedb cluster by using S3.
|
||||||
|
# It will expose cluster service ports as '24000', '24001', '24002', '24003' to local access.
|
||||||
|
function deploy_greptimedb_cluster_with_s3_storage() {
|
||||||
|
local cluster_name=$1
|
||||||
|
local install_namespace=$2
|
||||||
|
|
||||||
|
kubectl create ns "$install_namespace"
|
||||||
|
|
||||||
|
deploy_etcd_cluster "$install_namespace"
|
||||||
|
|
||||||
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
--set storage.credentials.secretName=s3-credentials \
|
||||||
|
--set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
|
||||||
|
--set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
|
||||||
|
|
||||||
|
# Wait for greptimedb cluster to be ready.
|
||||||
|
while true; do
|
||||||
|
PHASE=$(kubectl -n "$install_namespace" get gtc "$cluster_name" -o jsonpath='{.status.clusterPhase}')
|
||||||
|
if [ "$PHASE" == "Running" ]; then
|
||||||
|
echo "Cluster is ready"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||||
|
sleep 5 # wait for 5 seconds before check again.
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Expose greptimedb cluster to local access.
|
||||||
|
kubectl -n "$install_namespace" port-forward svc/"$cluster_name"-frontend \
|
||||||
|
24000:4000 \
|
||||||
|
24001:4001 \
|
||||||
|
24002:4002 \
|
||||||
|
24003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy standalone greptimedb.
|
||||||
|
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
|
||||||
|
function deploy_standalone_greptimedb() {
|
||||||
|
helm install greptimedb-standalone greptime/greptimedb-standalone \
|
||||||
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Wait for etcd cluster to be ready.
|
||||||
|
kubectl rollout status statefulset/greptimedb-standalone -n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
|
# Expose greptimedb to local access.
|
||||||
|
kubectl -n "$DEFAULT_INSTALL_NAMESPACE" port-forward svc/greptimedb-standalone \
|
||||||
|
34000:4000 \
|
||||||
|
34001:4001 \
|
||||||
|
34002:4002 \
|
||||||
|
34003:4003 > /tmp/connections.out &
|
||||||
|
}
|
||||||
|
|
||||||
|
# Entrypoint of the script.
|
||||||
|
function main() {
|
||||||
|
create_kind_cluster
|
||||||
|
add_greptime_chart
|
||||||
|
|
||||||
|
# Deploy standalone greptimedb in the same K8s.
|
||||||
|
if [ "$ENABLE_STANDALONE_MODE" == "true" ]; then
|
||||||
|
deploy_standalone_greptimedb
|
||||||
|
fi
|
||||||
|
|
||||||
|
deploy_greptimedb_operator
|
||||||
|
deploy_greptimedb_cluster testcluster testcluster
|
||||||
|
deploy_greptimedb_cluster_with_s3_storage testcluster-s3 testcluster-s3
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usages:
|
||||||
|
# - Deploy greptimedb cluster: ./deploy-greptimedb.sh
|
||||||
|
main
|
||||||
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
102
.github/scripts/upload-artifacts-to-s3.sh
vendored
Executable file
@@ -0,0 +1,102 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
ARTIFACTS_DIR=$1
|
||||||
|
VERSION=$2
|
||||||
|
AWS_S3_BUCKET=$3
|
||||||
|
RELEASE_DIRS="releases/greptimedb"
|
||||||
|
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"
|
||||||
|
|
||||||
|
# Check if necessary variables are set.
|
||||||
|
function check_vars() {
|
||||||
|
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
|
||||||
|
if [ -z "${!var}" ]; then
|
||||||
|
echo "$var is not set or empty."
|
||||||
|
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Uploads artifacts to AWS S3 bucket.
|
||||||
|
function upload_artifacts() {
|
||||||
|
# The bucket layout will be:
|
||||||
|
# releases/greptimedb
|
||||||
|
# ├── latest-version.txt
|
||||||
|
# ├── latest-nightly-version.txt
|
||||||
|
# ├── v0.1.0
|
||||||
|
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
|
||||||
|
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
|
||||||
|
# └── v0.2.0
|
||||||
|
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
|
||||||
|
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
|
||||||
|
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
|
||||||
|
aws s3 cp \
|
||||||
|
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
|
||||||
|
function update_version_info() {
|
||||||
|
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
|
||||||
|
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
|
||||||
|
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
|
echo "Updating latest-version.txt"
|
||||||
|
echo "$VERSION" > latest-version.txt
|
||||||
|
aws s3 cp \
|
||||||
|
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If it's the nightly release, update latest-nightly-version.txt.
|
||||||
|
if [[ "$VERSION" == *"nightly"* ]]; then
|
||||||
|
echo "Updating latest-nightly-version.txt"
|
||||||
|
echo "$VERSION" > latest-nightly-version.txt
|
||||||
|
aws s3 cp \
|
||||||
|
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
|
||||||
|
function download_artifacts_from_github() {
|
||||||
|
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
|
||||||
|
# Check if jq is installed.
|
||||||
|
if ! command -v jq &> /dev/null; then
|
||||||
|
echo "jq is not installed. Please install jq to continue."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the latest release API response.
|
||||||
|
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")
|
||||||
|
|
||||||
|
# Extract download URLs for the artifacts.
|
||||||
|
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
|
||||||
|
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')
|
||||||
|
|
||||||
|
# Download each asset.
|
||||||
|
while IFS= read -r url; do
|
||||||
|
if [ -n "$url" ]; then
|
||||||
|
curl -LJO "$url"
|
||||||
|
echo "Downloaded: $url"
|
||||||
|
fi
|
||||||
|
done <<< "$ASSET_URLS"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
check_vars
|
||||||
|
download_artifacts_from_github
|
||||||
|
upload_artifacts
|
||||||
|
update_version_info
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usage example:
|
||||||
|
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
|
||||||
|
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
|
||||||
|
# AWS_DEFAULT_REGION=<your_region> \
|
||||||
|
# UPDATE_VERSION_INFO=true \
|
||||||
|
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
|
||||||
|
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
|
||||||
|
main
|
||||||
4
.github/workflows/apidoc.yml
vendored
4
.github/workflows/apidoc.yml
vendored
@@ -13,11 +13,11 @@ on:
|
|||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: arduino/setup-protoc@v1
|
- uses: arduino/setup-protoc@v1
|
||||||
|
|||||||
77
.github/workflows/dev-build.yml
vendored
77
.github/workflows/dev-build.yml
vendored
@@ -16,11 +16,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-latest
|
- ubuntu-20.04
|
||||||
- ubuntu-latest-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-latest-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-latest-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-latest-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -55,10 +55,18 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
|
cargo_profile:
|
||||||
|
type: choice
|
||||||
|
description: The cargo profile to use in building GreptimeDB.
|
||||||
|
default: nightly
|
||||||
|
options:
|
||||||
|
- dev
|
||||||
|
- release
|
||||||
|
- nightly
|
||||||
|
|
||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: ${{ inputs.cargo_profile }}
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
|
||||||
@@ -78,7 +86,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -164,12 +172,7 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
upload-to-s3: false # No need to upload to S3.
|
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
@@ -198,12 +201,7 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
dev-mode: true # Only build the standard greptime binary.
|
dev-mode: true # Only build the standard greptime binary.
|
||||||
upload-to-s3: false # No need to upload to S3.
|
|
||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
@@ -214,7 +212,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
build-result: ${{ steps.set-build-result.outputs.build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -239,41 +237,44 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "build-result=success" >> $GITHUB_OUTPUT
|
echo "build-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
release-images-to-acr:
|
release-cn-artifacts:
|
||||||
name: Build and push images to ACR
|
name: Release artifacts to CN region
|
||||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
release-images-to-dockerhub,
|
||||||
build-linux-arm64-artifacts,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Build and push images to ACR
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
src-image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-name: ${{ env.IMAGE_NAME }}
|
src-image-name: ${{ env.IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
|
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||||
dev-mode: true # Only build the standard images.
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
dev-mode: true # Only build the standard images(exclude centos images).
|
||||||
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
update-version-info: false # Don't update the version info in S3.
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -298,7 +299,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -325,7 +326,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
73
.github/workflows/develop.yml
vendored
73
.github/workflows/develop.yml
vendored
@@ -29,12 +29,12 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@v1.13.10
|
||||||
@@ -42,7 +42,10 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ windows-latest-8-cores, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -60,7 +63,7 @@ jobs:
|
|||||||
toml:
|
toml:
|
||||||
name: Toml Check
|
name: Toml Check
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -80,7 +83,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
|
os: [ ubuntu-20.04-8-cores ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -105,7 +108,7 @@ jobs:
|
|||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -124,7 +127,7 @@ jobs:
|
|||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -142,7 +145,7 @@ jobs:
|
|||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest-8-cores
|
runs-on: ubuntu-20.04-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -161,15 +164,21 @@ jobs:
|
|||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Install cargo-llvm-cov
|
||||||
|
uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
- name: Install PyArrow Package
|
- name: Install PyArrow Package
|
||||||
run: pip install pyarrow
|
run: pip install pyarrow
|
||||||
- name: Install cargo-llvm-cov
|
- name: Setup etcd server
|
||||||
uses: taiki-e/install-action@cargo-llvm-cov
|
working-directory: tests-integration/fixtures/etcd
|
||||||
- name: Collect coverage data
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Setup kafka server
|
||||||
|
working-directory: tests-integration/fixtures/kafka
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
|
||||||
@@ -179,6 +188,8 @@ jobs:
|
|||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Codecov upload
|
- name: Codecov upload
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v2
|
||||||
@@ -188,43 +199,3 @@ jobs:
|
|||||||
flags: rust
|
flags: rust
|
||||||
fail_ci_if_error: false
|
fail_ci_if_error: false
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
test-on-windows:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: windows-latest-8-cores
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- run: git config --global core.autocrlf false
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: arduino/setup-protoc@v1
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Install Rust toolchain
|
|
||||||
uses: dtolnay/rust-toolchain@master
|
|
||||||
with:
|
|
||||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
|
||||||
components: llvm-tools-preview
|
|
||||||
- name: Rust Cache
|
|
||||||
uses: Swatinem/rust-cache@v2
|
|
||||||
- name: Install Cargo Nextest
|
|
||||||
uses: taiki-e/install-action@nextest
|
|
||||||
- name: Install Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.10'
|
|
||||||
- name: Install PyArrow Package
|
|
||||||
run: pip install pyarrow
|
|
||||||
- name: Install WSL distribution
|
|
||||||
uses: Vampire/setup-wsl@v2
|
|
||||||
with:
|
|
||||||
distribution: Ubuntu-22.04
|
|
||||||
- name: Running tests
|
|
||||||
run: cargo nextest run -F pyo3_backend,dashboard
|
|
||||||
env:
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
|
||||||
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
|
||||||
|
|||||||
4
.github/workflows/doc-issue.yml
vendored
4
.github/workflows/doc-issue.yml
vendored
@@ -11,7 +11,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
doc_issue:
|
doc_issue:
|
||||||
if: github.event.label.name == 'doc update required'
|
if: github.event.label.name == 'doc update required'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: create an issue in doc repo
|
- name: create an issue in doc repo
|
||||||
uses: dacbd/create-issue-action@main
|
uses: dacbd/create-issue-action@main
|
||||||
@@ -25,7 +25,7 @@ jobs:
|
|||||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||||
cloud_issue:
|
cloud_issue:
|
||||||
if: github.event.label.name == 'cloud followup required'
|
if: github.event.label.name == 'cloud followup required'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: create an issue in cloud repo
|
- name: create an issue in cloud repo
|
||||||
uses: dacbd/create-issue-action@main
|
uses: dacbd/create-issue-action@main
|
||||||
|
|||||||
20
.github/workflows/doc-label.yml
vendored
Normal file
20
.github/workflows/doc-label.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
name: "PR Doc Labeler"
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
triage:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: github/issue-labeler@v3.3
|
||||||
|
with:
|
||||||
|
configuration-path: .github/doc-label-config.yml
|
||||||
|
enable-versioned-regex: false
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
sync-labels: 1
|
||||||
12
.github/workflows/docs.yml
vendored
12
.github/workflows/docs.yml
vendored
@@ -30,7 +30,7 @@ name: CI
|
|||||||
jobs:
|
jobs:
|
||||||
typos:
|
typos:
|
||||||
name: Spell Check with Typos
|
name: Spell Check with Typos
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: crate-ci/typos@v1.13.10
|
- uses: crate-ci/typos@v1.13.10
|
||||||
@@ -38,33 +38,33 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
name: Check
|
name: Check
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
4
.github/workflows/license.yaml
vendored
4
.github/workflows/license.yaml
vendored
@@ -8,9 +8,9 @@ on:
|
|||||||
types: [opened, synchronize, reopened, ready_for_review]
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
jobs:
|
jobs:
|
||||||
license-header-check:
|
license-header-check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
name: license-header-check
|
name: license-header-check
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Check License Header
|
- name: Check License Header
|
||||||
uses: apache/skywalking-eyes/header@df70871af1a8109c9a5b1dc824faaf65246c5236
|
uses: korandoru/hawkeye@v3
|
||||||
|
|||||||
62
.github/workflows/nightly-build.yml
vendored
62
.github/workflows/nightly-build.yml
vendored
@@ -14,11 +14,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.2xlarge-amd64
|
default: ec2-c6i.2xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-latest
|
- ubuntu-20.04
|
||||||
- ubuntu-latest-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-latest-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-latest-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-latest-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -70,7 +70,7 @@ jobs:
|
|||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
@@ -147,11 +147,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
upload-latest-artifacts: false
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -171,11 +166,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
upload-latest-artifacts: false
|
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -185,7 +175,7 @@ jobs:
|
|||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
|
||||||
steps:
|
steps:
|
||||||
@@ -208,15 +198,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
release-images-to-acr:
|
release-cn-artifacts:
|
||||||
name: Build and push images to ACR
|
name: Release artifacts to CN region
|
||||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
release-images-to-dockerhub,
|
||||||
build-linux-arm64-artifacts,
|
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -226,21 +215,30 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Build and push images to ACR
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
src-image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
src-image-name: greptimedb
|
||||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
|
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: false # Don't push the latest tag to registry.
|
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||||
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
dev-mode: false
|
||||||
|
update-version-info: false # Don't update version info in S3.
|
||||||
|
push-latest-tag: false # Don't push the latest tag to registry.
|
||||||
|
|
||||||
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
|
||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -265,7 +263,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
@@ -292,7 +290,7 @@ jobs:
|
|||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
100
.github/workflows/nightly-ci.yml
vendored
Normal file
100
.github/workflows/nightly-ci.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 23 * * 1-5'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
name: Nightly CI
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sqlness:
|
||||||
|
name: Sqlness Test
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ windows-latest-8-cores ]
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4.1.0
|
||||||
|
- uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Run sqlness
|
||||||
|
run: cargo sqlness
|
||||||
|
- name: Notify slack if failed
|
||||||
|
if: failure()
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed for sqlness tests"}
|
||||||
|
- name: Upload sqlness logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: sqlness-logs
|
||||||
|
path: ${{ runner.temp }}/greptime-*.log
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
test-on-windows:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: windows-latest-8-cores
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- run: git config --global core.autocrlf false
|
||||||
|
- uses: actions/checkout@v4.1.0
|
||||||
|
- uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Install Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
components: llvm-tools-preview
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Install Cargo Nextest
|
||||||
|
uses: taiki-e/install-action@nextest
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: Install PyArrow Package
|
||||||
|
run: pip install pyarrow
|
||||||
|
- name: Install WSL distribution
|
||||||
|
uses: Vampire/setup-wsl@v2
|
||||||
|
with:
|
||||||
|
distribution: Ubuntu-22.04
|
||||||
|
- name: Running tests
|
||||||
|
run: cargo nextest run -F pyo3_backend,dashboard
|
||||||
|
env:
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
GT_S3_BUCKET: ${{ secrets.S3_BUCKET }}
|
||||||
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||||
|
GT_S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
|
||||||
|
GT_S3_REGION: ${{ secrets.S3_REGION }}
|
||||||
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
- name: Notify slack if failed
|
||||||
|
if: failure()
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed for cargo test"}
|
||||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
27
.github/workflows/nightly-funtional-tests.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
name: Nightly functional tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# At 00:00 on Tuesday.
|
||||||
|
- cron: '0 0 * * 2'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sqlness-test:
|
||||||
|
name: Run sqlness test
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Run sqlness test
|
||||||
|
uses: ./.github/actions/sqlness-test
|
||||||
|
with:
|
||||||
|
data-root: sqlness-test
|
||||||
|
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
|
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
4
.github/workflows/pr-title-checker.yml
vendored
4
.github/workflows/pr-title-checker.yml
vendored
@@ -10,7 +10,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||||
@@ -19,7 +19,7 @@ jobs:
|
|||||||
pass_on_octokit_error: false
|
pass_on_octokit_error: false
|
||||||
configuration_path: ".github/pr-title-checker-config.json"
|
configuration_path: ".github/pr-title-checker-config.json"
|
||||||
breaking:
|
breaking:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: thehanimo/pr-title-checker@v1.3.4
|
- uses: thehanimo/pr-title-checker@v1.3.4
|
||||||
|
|||||||
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
85
.github/workflows/release-dev-builder-images.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
name: Release dev-builder images
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: Version of the dev-builder
|
||||||
|
required: false
|
||||||
|
default: latest
|
||||||
|
release_dev_builder_ubuntu_image:
|
||||||
|
type: boolean
|
||||||
|
description: Release dev-builder-ubuntu image
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
release_dev_builder_centos_image:
|
||||||
|
type: boolean
|
||||||
|
description: Release dev-builder-centos image
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
release_dev_builder_android_image:
|
||||||
|
type: boolean
|
||||||
|
description: Release dev-builder-android image
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release-dev-builder-images:
|
||||||
|
name: Release dev builder images
|
||||||
|
if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
|
||||||
|
runs-on: ubuntu-20.04-16-cores
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build and push dev builder images
|
||||||
|
uses: ./.github/actions/build-dev-builder-images
|
||||||
|
with:
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
|
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
|
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
|
||||||
|
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
|
||||||
|
name: Release dev builder images to CN region
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs: [
|
||||||
|
release-dev-builder-images
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Push dev-builder-ubuntu image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_ubuntu_image }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
run: |
|
||||||
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }} \
|
||||||
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ inputs.version }}
|
||||||
|
|
||||||
|
- name: Push dev-builder-centos image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_centos_image }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
run: |
|
||||||
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }} \
|
||||||
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ inputs.version }}
|
||||||
|
|
||||||
|
- name: Push dev-builder-android image
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.release_dev_builder_android_image }}
|
||||||
|
env:
|
||||||
|
DST_REGISTRY_USERNAME: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
DST_REGISTRY_PASSWORD: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
run: |
|
||||||
|
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }} \
|
||||||
|
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
|
||||||
|
docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ inputs.version }}
|
||||||
149
.github/workflows/release.yml
vendored
149
.github/workflows/release.yml
vendored
@@ -18,11 +18,11 @@ on:
|
|||||||
description: The runner uses to build linux-amd64 artifacts
|
description: The runner uses to build linux-amd64 artifacts
|
||||||
default: ec2-c6i.4xlarge-amd64
|
default: ec2-c6i.4xlarge-amd64
|
||||||
options:
|
options:
|
||||||
- ubuntu-latest
|
- ubuntu-20.04
|
||||||
- ubuntu-latest-8-cores
|
- ubuntu-20.04-8-cores
|
||||||
- ubuntu-latest-16-cores
|
- ubuntu-20.04-16-cores
|
||||||
- ubuntu-latest-32-cores
|
- ubuntu-20.04-32-cores
|
||||||
- ubuntu-latest-64-cores
|
- ubuntu-20.04-64-cores
|
||||||
- ec2-c6i.xlarge-amd64 # 4C8G
|
- ec2-c6i.xlarge-amd64 # 4C8G
|
||||||
- ec2-c6i.2xlarge-amd64 # 8C16G
|
- ec2-c6i.2xlarge-amd64 # 8C16G
|
||||||
- ec2-c6i.4xlarge-amd64 # 16C32G
|
- ec2-c6i.4xlarge-amd64 # 16C32G
|
||||||
@@ -63,7 +63,12 @@ on:
|
|||||||
description: Build macos artifacts
|
description: Build macos artifacts
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
release_artifacts:
|
build_windows_artifacts:
|
||||||
|
type: boolean
|
||||||
|
description: Build Windows artifacts
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
publish_github_release:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Create GitHub release and upload artifacts
|
description: Create GitHub release and upload artifacts
|
||||||
required: false
|
required: false
|
||||||
@@ -73,16 +78,11 @@ on:
|
|||||||
description: Build and push images to DockerHub and ACR
|
description: Build and push images to DockerHub and ACR
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
release_dev_builder_image:
|
|
||||||
type: boolean
|
|
||||||
description: Release dev-builder image
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
|
|
||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2023-08-07
|
RUST_TOOLCHAIN: nightly-2023-12-19
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
@@ -91,17 +91,18 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.5.0
|
NEXT_RELEASE_VERSION: v0.6.0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
outputs:
|
outputs:
|
||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||||
|
windows-runner: windows-latest-8-cores
|
||||||
|
|
||||||
# The following EC2 resource id will be used for resource releasing.
|
# The following EC2 resource id will be used for resource releasing.
|
||||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
@@ -177,11 +178,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -201,11 +197,6 @@ jobs:
|
|||||||
cargo-profile: ${{ env.CARGO_PROFILE }}
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
|
||||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
@@ -247,12 +238,43 @@ jobs:
|
|||||||
features: ${{ matrix.features }}
|
features: ${{ matrix.features }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
|
||||||
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
build-windows-artifacts:
|
||||||
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
name: Build Windows artifacts
|
||||||
upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||||
|
arch: x86_64-pc-windows-msvc
|
||||||
|
features: servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-windows-amd64
|
||||||
|
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
|
||||||
|
arch: x86_64-pc-windows-msvc
|
||||||
|
features: pyo3_backend,servers/dashboard
|
||||||
|
artifacts-dir-prefix: greptime-windows-amd64-pyo3
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
needs: [
|
||||||
|
allocate-runners,
|
||||||
|
]
|
||||||
|
if: ${{ inputs.build_windows_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
steps:
|
||||||
|
- run: git config --global core.autocrlf false
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- uses: ./.github/actions/build-windows-artifacts
|
||||||
|
with:
|
||||||
|
arch: ${{ matrix.arch }}
|
||||||
|
rust-toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
cargo-profile: ${{ env.CARGO_PROFILE }}
|
||||||
|
features: ${{ matrix.features }}
|
||||||
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
|
||||||
|
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
@@ -277,15 +299,18 @@ jobs:
|
|||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
release-images-to-acr:
|
release-cn-artifacts:
|
||||||
name: Build and push images to ACR
|
name: Release artifacts to CN region
|
||||||
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [ # The job have to wait for all the artifacts are built.
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
|
build-macos-artifacts,
|
||||||
|
build-windows-artifacts,
|
||||||
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-2004-16-cores
|
runs-on: ubuntu-20.04
|
||||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||||
# However, we don't want to fail the whole workflow because of this.
|
# However, we don't want to fail the whole workflow because of this.
|
||||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||||
@@ -295,55 +320,47 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Build and push images to ACR
|
- name: Release artifacts to CN region
|
||||||
uses: ./.github/actions/build-images
|
uses: ./.github/actions/release-cn-artifacts
|
||||||
with:
|
with:
|
||||||
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
src-image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
src-image-name: greptimedb
|
||||||
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
|
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
|
||||||
|
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
|
||||||
|
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
|
||||||
|
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
|
||||||
|
dev-mode: false
|
||||||
|
update-version-info: true
|
||||||
|
push-latest-tag: true
|
||||||
|
|
||||||
release-artifacts:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
needs: [
|
needs: [ # The job have to wait for all the artifacts are built.
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Release artifacts
|
- name: Publish GitHub release
|
||||||
uses: ./.github/actions/release-artifacts
|
uses: ./.github/actions/publish-github-release
|
||||||
with:
|
with:
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
|
|
||||||
release-dev-builder-image:
|
|
||||||
name: Release dev builder image
|
|
||||||
if: ${{ inputs.release_dev_builder_image }} # Only manually trigger this job.
|
|
||||||
runs-on: ubuntu-latest-16-cores
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Build and push dev builder image
|
|
||||||
uses: ./.github/actions/build-dev-builder-image
|
|
||||||
with:
|
|
||||||
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
acr-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
|
||||||
acr-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
|
||||||
acr-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
|
||||||
|
|
||||||
### Stop runners ###
|
### Stop runners ###
|
||||||
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
|
||||||
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
# Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
|
||||||
@@ -351,7 +368,7 @@ jobs:
|
|||||||
name: Stop linux-amd64 runner
|
name: Stop linux-amd64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
@@ -376,7 +393,7 @@ jobs:
|
|||||||
name: Stop linux-arm64 runner
|
name: Stop linux-arm64 runner
|
||||||
# Only run this job when the runner is allocated.
|
# Only run this job when the runner is allocated.
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
needs: [
|
needs: [
|
||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
|
|||||||
25
.github/workflows/size-label.yml
vendored
Normal file
25
.github/workflows/size-label.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: size-labeler
|
||||||
|
|
||||||
|
on: [pull_request_target]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Label the PR size
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: codelytv/pr-size-labeler@v1
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
s_label: 'Size: S'
|
||||||
|
s_max_size: '100'
|
||||||
|
m_label: 'Size: M'
|
||||||
|
m_max_size: '500'
|
||||||
|
l_label: 'Size: L'
|
||||||
|
l_max_size: '1000'
|
||||||
|
xl_label: 'Size: XL'
|
||||||
|
fail_if_xl: 'false'
|
||||||
|
message_if_xl: ""
|
||||||
|
files_to_ignore: 'Cargo.lock'
|
||||||
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
19
.github/workflows/user-doc-label-checker.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Check user doc labels
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- reopened
|
||||||
|
- labeled
|
||||||
|
- unlabeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
check_labels:
|
||||||
|
name: Check doc labels
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: docker://agilepathway/pull-request-label-checker:latest
|
||||||
|
with:
|
||||||
|
one_of: Doc update required,Doc not needed
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
header:
|
|
||||||
license:
|
|
||||||
spdx-id: Apache-2.0
|
|
||||||
copyright-owner: Greptime Team
|
|
||||||
|
|
||||||
paths:
|
|
||||||
- "**/*.rs"
|
|
||||||
- "**/*.py"
|
|
||||||
|
|
||||||
comment: on-failure
|
|
||||||
|
|
||||||
dependency:
|
|
||||||
files:
|
|
||||||
- Cargo.toml
|
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||||
|
|
||||||
Read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||||
|
|
||||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ Pull requests are great, but we accept all kinds of other help if you like. Such
|
|||||||
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
- Write tutorials or blog posts. Blog, speak about, or create tutorials about one of GreptimeDB's many features. Mention [@greptime](https://twitter.com/greptime) on Twitter and email info@greptime.com so we can give pointers and tips and help you spread the word by promoting your content on Greptime communication channels.
|
||||||
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
- Improve the documentation. [Submit documentation](http://github.com/greptimeTeam/docs/) updates, enhancements, designs, or bug fixes, and fixing any spelling or grammar errors will be very much appreciated.
|
||||||
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
- Present at meetups and conferences about your GreptimeDB projects. Your unique challenges and successes in building things with GreptimeDB can provide great speaking material. We'd love to review your talk abstract, so get in touch with us if you'd like some help!
|
||||||
- Submit bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
- Submitting bug reports. To report a bug or a security issue, you can [open a new GitHub issue](https://github.com/GrepTimeTeam/greptimedb/issues/new).
|
||||||
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
- Speak up feature requests. Send feedback is a great way for us to understand your different use cases of GreptimeDB better. If you want to share your experience with GreptimeDB, or if you want to discuss any ideas, you can start a discussion on [GitHub discussions](https://github.com/GreptimeTeam/greptimedb/discussions), chat with the Greptime team on [Slack](https://greptime.com/slack), or you can tweet [@greptime](https://twitter.com/greptime) on Twitter.
|
||||||
|
|
||||||
## Code of Conduct
|
## Code of Conduct
|
||||||
@@ -49,6 +49,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
|||||||
### Before PR
|
### Before PR
|
||||||
|
|
||||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||||
|
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/).
|
||||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||||
@@ -81,7 +82,7 @@ Now, `pre-commit` will run automatically on `git commit`.
|
|||||||
### Title
|
### Title
|
||||||
|
|
||||||
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
The titles of pull requests should be prefixed with category names listed in [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0)
|
||||||
like `feat`/`fix`/`docs`, with a concise summary of code change following. DO NOT use last commit message as pull request title.
|
like `feat`/`fix`/`docs`, with a concise summary of code change following. AVOID using the last commit message as pull request title.
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
@@ -100,7 +101,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
|
|||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
The core team will be thrilled if you participate in any way you like. When you are stuck, try ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
|
||||||
|
|
||||||
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
- [GreptimeDB Community Slack](https://greptime.com/slack)
|
||||||
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
- [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
|
||||||
|
|||||||
4253
Cargo.lock
generated
4253
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
75
Cargo.toml
75
Cargo.toml
@@ -27,6 +27,7 @@ members = [
|
|||||||
"src/common/telemetry",
|
"src/common/telemetry",
|
||||||
"src/common/test-util",
|
"src/common/test-util",
|
||||||
"src/common/time",
|
"src/common/time",
|
||||||
|
"src/common/decimal",
|
||||||
"src/common/version",
|
"src/common/version",
|
||||||
"src/datanode",
|
"src/datanode",
|
||||||
"src/datatypes",
|
"src/datatypes",
|
||||||
@@ -35,84 +36,106 @@ members = [
|
|||||||
"src/log-store",
|
"src/log-store",
|
||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
|
"src/metric-engine",
|
||||||
"src/mito2",
|
"src/mito2",
|
||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
"src/partition",
|
"src/partition",
|
||||||
"src/plugins",
|
"src/plugins",
|
||||||
"src/promql",
|
"src/promql",
|
||||||
|
"src/puffin",
|
||||||
"src/query",
|
"src/query",
|
||||||
"src/script",
|
"src/script",
|
||||||
"src/servers",
|
"src/servers",
|
||||||
"src/session",
|
"src/session",
|
||||||
"src/sql",
|
"src/sql",
|
||||||
"src/storage",
|
|
||||||
"src/store-api",
|
"src/store-api",
|
||||||
"src/table",
|
"src/table",
|
||||||
|
"src/index",
|
||||||
"tests-integration",
|
"tests-integration",
|
||||||
"tests/runner",
|
"tests/runner",
|
||||||
]
|
]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.4.0"
|
version = "0.5.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||||
aquamarine = "0.3"
|
aquamarine = "0.3"
|
||||||
arrow = { version = "43.0" }
|
arrow = { version = "47.0" }
|
||||||
arrow-array = "43.0"
|
arrow-array = "47.0"
|
||||||
arrow-flight = "43.0"
|
arrow-flight = "47.0"
|
||||||
arrow-schema = { version = "43.0", features = ["serde"] }
|
arrow-schema = { version = "47.0", features = ["serde"] }
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
base64 = "0.21"
|
||||||
|
bigdecimal = "0.4.2"
|
||||||
|
bitflags = "2.4.1"
|
||||||
|
bytemuck = "1.12"
|
||||||
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
dashmap = "5.4"
|
||||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "b6f3b28b6fe91924cc8dd3d83726b766f2a706ec" }
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
etcd-client = "0.11"
|
etcd-client = "0.12"
|
||||||
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1f1dd532a111e3834cc3019c5605e2993ffb9dc3" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a31ea166fc015ea7ff111ac94e26c3a5d64364d2" }
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "abbd357c1e193cd270ea65ee7652334a150b628f" }
|
||||||
metrics = "0.20"
|
mockall = "0.11.4"
|
||||||
moka = "0.12"
|
moka = "0.12"
|
||||||
once_cell = "1.18"
|
once_cell = "1.18"
|
||||||
opentelemetry-proto = { version = "0.2", features = ["gen-tonic", "metrics"] }
|
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
|
||||||
parquet = "43.0"
|
"gen-tonic",
|
||||||
|
"metrics",
|
||||||
|
"trace",
|
||||||
|
] }
|
||||||
|
parquet = "47.0"
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
prost = "0.11"
|
pin-project = "1.0"
|
||||||
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
|
prost = "0.12"
|
||||||
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
|
regex-automata = { version = "0.1", features = ["transducer"] }
|
||||||
reqwest = { version = "0.11", default-features = false, features = [
|
reqwest = { version = "0.11", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
] }
|
] }
|
||||||
|
rskafka = "0.5"
|
||||||
|
rust_decimal = "1.33"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
smallvec = "1"
|
smallvec = "1"
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu = "0.7"
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "296a4f6c73b129d6f565a42a2e5e53c6bc2b9da4", features = [
|
# on branch v0.38.x
|
||||||
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "6a93567ae38d42be5c8d08b13c8ff4dde26502ef", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1.28", features = ["full"] }
|
tokio = { version = "1.28", features = ["full"] }
|
||||||
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.7"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.9", features = ["tls"] }
|
tonic = { version = "0.10", features = ["tls"] }
|
||||||
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1", features = ["serde", "v4", "fast-rng"] }
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
@@ -123,6 +146,7 @@ common-base = { path = "src/common/base" }
|
|||||||
common-catalog = { path = "src/common/catalog" }
|
common-catalog = { path = "src/common/catalog" }
|
||||||
common-config = { path = "src/common/config" }
|
common-config = { path = "src/common/config" }
|
||||||
common-datasource = { path = "src/common/datasource" }
|
common-datasource = { path = "src/common/datasource" }
|
||||||
|
common-decimal = { path = "src/common/decimal" }
|
||||||
common-error = { path = "src/common/error" }
|
common-error = { path = "src/common/error" }
|
||||||
common-function = { path = "src/common/function" }
|
common-function = { path = "src/common/function" }
|
||||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||||
@@ -148,7 +172,7 @@ frontend = { path = "src/frontend" }
|
|||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
mito = { path = "src/mito" }
|
metric-engine = { path = "src/metric-engine" }
|
||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
@@ -160,7 +184,6 @@ script = { path = "src/script" }
|
|||||||
servers = { path = "src/servers" }
|
servers = { path = "src/servers" }
|
||||||
session = { path = "src/session" }
|
session = { path = "src/session" }
|
||||||
sql = { path = "src/sql" }
|
sql = { path = "src/sql" }
|
||||||
storage = { path = "src/storage" }
|
|
||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
@@ -170,7 +193,7 @@ git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
|||||||
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
rev = "abbd357c1e193cd270ea65ee7652334a150b628f"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = 1
|
||||||
|
|
||||||
[profile.nightly]
|
[profile.nightly]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
|||||||
same "printed page" as the copyright notice for easier
|
same "printed page" as the copyright notice for easier
|
||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright 2022 Greptime Team
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -94,7 +94,7 @@ build-android-bin: ## Build greptime binary for android.
|
|||||||
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
CARGO_BUILD_EXTRA_OPTS="--bin greptime --no-default-features"
|
||||||
|
|
||||||
.PHONY: strip-android-bin
|
.PHONY: strip-android-bin
|
||||||
strip-android-bin: ## Strip greptime binary for android.
|
strip-android-bin: build-android-bin ## Strip greptime binary for android.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-android:latest \
|
||||||
@@ -157,11 +157,11 @@ sqlness-test: ## Run sqlness test.
|
|||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
cargo check --workspace --all-targets
|
cargo check --workspace --all-targets --all-features
|
||||||
|
|
||||||
.PHONY: clippy
|
.PHONY: clippy
|
||||||
clippy: ## Check clippy rules.
|
clippy: ## Check clippy rules.
|
||||||
cargo clippy --workspace --all-targets -F pyo3_backend -- -D warnings
|
cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||||
|
|
||||||
.PHONY: fmt-check
|
.PHONY: fmt-check
|
||||||
fmt-check: ## Check code format.
|
fmt-check: ## Check code format.
|
||||||
|
|||||||
20
README.md
20
README.md
@@ -27,13 +27,8 @@
|
|||||||
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
<a href="https://greptime.com/slack"><img src="https://img.shields.io/badge/slack-GreptimeDB-0abd59?logo=slack" alt="slack" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Upcoming Event
|
> [!WARNING]
|
||||||
Come and meet us in **KubeCon + CloudNativeCon North America 2023!**
|
> Our default branch has changed from `develop` to `main` (issue [#3025](https://github.com/GreptimeTeam/greptimedb/issues/3025)). Please update your local repository to use the `main` branch.
|
||||||
<p align="center">
|
|
||||||
<picture>
|
|
||||||
<img alt="KubeCon + CloudNativeCon North Logo" src="./docs/banner/KCCNC_NA_2023_1000x200_Email Banner.png" width="800px">
|
|
||||||
</picture>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
## What is GreptimeDB
|
## What is GreptimeDB
|
||||||
|
|
||||||
@@ -104,11 +99,11 @@ Or if you built from docker:
|
|||||||
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
docker run -p 4002:4002 -v "$(pwd):/tmp/greptimedb" greptime/greptimedb standalone start
|
||||||
```
|
```
|
||||||
|
|
||||||
Please see [the online document site](https://docs.greptime.com/getting-started/overview#install-greptimedb) for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
Please see the online document site for more installation options and [operations info](https://docs.greptime.com/user-guide/operations/overview).
|
||||||
|
|
||||||
### Get started
|
### Get started
|
||||||
|
|
||||||
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
|
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview) on our [official document site](https://docs.greptime.com/).
|
||||||
|
|
||||||
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
|
||||||
|
|
||||||
@@ -117,7 +112,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
|||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
- [Pre-built Binaries](https://greptime.com/download):
|
- [Pre-built Binaries](https://greptime.com/download):
|
||||||
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
For Linux and macOS, you can easily download pre-built binaries including official releases and nightly builds that are ready to use.
|
||||||
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
In most cases, downloading the version without PyO3 is sufficient. However, if you plan to run scripts in CPython (and use Python packages like NumPy and Pandas), you will need to download the version with PyO3 and install a Python with the same version as the Python in the PyO3 version.
|
||||||
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
We recommend using virtualenv for the installation process to manage multiple Python versions.
|
||||||
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
- [Docker Images](https://hub.docker.com/r/greptime/greptimedb)(**recommended**): pre-built
|
||||||
@@ -143,6 +138,7 @@ To write and query data, GreptimeDB is compatible with multiple [protocols and c
|
|||||||
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
- [GreptimeDB Java Client](https://github.com/GreptimeTeam/greptimedb-client-java)
|
||||||
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
- [GreptimeDB Python Client](https://github.com/GreptimeTeam/greptimedb-client-py) (WIP)
|
||||||
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
- [GreptimeDB Rust Client](https://github.com/GreptimeTeam/greptimedb-client-rust)
|
||||||
|
- [GreptimeDB JavaScript Client](https://github.com/GreptimeTeam/greptime-js-sdk)
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
@@ -184,6 +180,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
|
|||||||
## Acknowledgement
|
## Acknowledgement
|
||||||
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
|
||||||
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
|
||||||
- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
|
- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
|
||||||
- GreptimeDB’s meta service is based on [etcd](https://etcd.io/).
|
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
|
||||||
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
||||||
|
|||||||
@@ -6,8 +6,10 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
client = { workspace = true }
|
client.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
indicatif = "0.17.1"
|
indicatif = "0.17.1"
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
|
|||||||
@@ -29,14 +29,14 @@ use client::api::v1::column::Values;
|
|||||||
use client::api::v1::{
|
use client::api::v1::{
|
||||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||||
};
|
};
|
||||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{Client, Database, Output, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use futures_util::TryStreamExt;
|
||||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||||
use tokio::task::JoinSet;
|
use tokio::task::JoinSet;
|
||||||
|
|
||||||
const CATALOG_NAME: &str = "greptime";
|
const CATALOG_NAME: &str = "greptime";
|
||||||
const SCHEMA_NAME: &str = "public";
|
const SCHEMA_NAME: &str = "public";
|
||||||
const TABLE_NAME: &str = "nyc_taxi";
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "NYC benchmark runner")]
|
#[command(name = "NYC benchmark runner")]
|
||||||
@@ -74,7 +74,12 @@ fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_table_name() -> String {
|
||||||
|
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||||
|
}
|
||||||
|
|
||||||
async fn write_data(
|
async fn write_data(
|
||||||
|
table_name: &str,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
@@ -104,7 +109,7 @@ async fn write_data(
|
|||||||
}
|
}
|
||||||
let (columns, row_count) = convert_record_batch(record_batch);
|
let (columns, row_count) = convert_record_batch(record_batch);
|
||||||
let request = InsertRequest {
|
let request = InsertRequest {
|
||||||
table_name: TABLE_NAME.to_string(),
|
table_name: table_name.to_string(),
|
||||||
columns,
|
columns,
|
||||||
row_count,
|
row_count,
|
||||||
};
|
};
|
||||||
@@ -113,7 +118,7 @@ async fn write_data(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let _ = db.insert(requests).await.unwrap();
|
db.insert(requests).await.unwrap();
|
||||||
let elapsed = now.elapsed();
|
let elapsed = now.elapsed();
|
||||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||||
progress_bar.inc(row_count as _);
|
progress_bar.inc(row_count as _);
|
||||||
@@ -131,6 +136,11 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
|||||||
|
|
||||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||||
let (values, datatype) = build_values(array);
|
let (values, datatype) = build_values(array);
|
||||||
|
let semantic_type = match field.name().as_str() {
|
||||||
|
"VendorID" => SemanticType::Tag,
|
||||||
|
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||||
|
_ => SemanticType::Field,
|
||||||
|
};
|
||||||
|
|
||||||
let column = Column {
|
let column = Column {
|
||||||
column_name: field.name().clone(),
|
column_name: field.name().clone(),
|
||||||
@@ -141,7 +151,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
|||||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
datatype: datatype.into(),
|
datatype: datatype.into(),
|
||||||
// datatype and semantic_type are set to default
|
semantic_type: semantic_type as i32,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
columns.push(column);
|
columns.push(column);
|
||||||
@@ -243,11 +253,11 @@ fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
|||||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_table_expr() -> CreateTableExpr {
|
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||||
CreateTableExpr {
|
CreateTableExpr {
|
||||||
catalog_name: CATALOG_NAME.to_string(),
|
catalog_name: CATALOG_NAME.to_string(),
|
||||||
schema_name: SCHEMA_NAME.to_string(),
|
schema_name: SCHEMA_NAME.to_string(),
|
||||||
table_name: TABLE_NAME.to_string(),
|
table_name: table_name.to_string(),
|
||||||
desc: "".to_string(),
|
desc: "".to_string(),
|
||||||
column_defs: vec![
|
column_defs: vec![
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
@@ -257,14 +267,16 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Tag as i32,
|
semantic_type: SemanticType::Tag as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tpep_pickup_datetime".to_string(),
|
name: "tpep_pickup_datetime".to_string(),
|
||||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||||
is_nullable: true,
|
is_nullable: false,
|
||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tpep_dropoff_datetime".to_string(),
|
name: "tpep_dropoff_datetime".to_string(),
|
||||||
@@ -273,6 +285,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "passenger_count".to_string(),
|
name: "passenger_count".to_string(),
|
||||||
@@ -281,6 +294,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "trip_distance".to_string(),
|
name: "trip_distance".to_string(),
|
||||||
@@ -289,6 +303,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "RatecodeID".to_string(),
|
name: "RatecodeID".to_string(),
|
||||||
@@ -297,6 +312,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "store_and_fwd_flag".to_string(),
|
name: "store_and_fwd_flag".to_string(),
|
||||||
@@ -305,6 +321,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "PULocationID".to_string(),
|
name: "PULocationID".to_string(),
|
||||||
@@ -313,6 +330,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "DOLocationID".to_string(),
|
name: "DOLocationID".to_string(),
|
||||||
@@ -321,6 +339,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "payment_type".to_string(),
|
name: "payment_type".to_string(),
|
||||||
@@ -329,6 +348,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "fare_amount".to_string(),
|
name: "fare_amount".to_string(),
|
||||||
@@ -337,6 +357,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "extra".to_string(),
|
name: "extra".to_string(),
|
||||||
@@ -345,6 +366,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "mta_tax".to_string(),
|
name: "mta_tax".to_string(),
|
||||||
@@ -353,6 +375,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tip_amount".to_string(),
|
name: "tip_amount".to_string(),
|
||||||
@@ -361,6 +384,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "tolls_amount".to_string(),
|
name: "tolls_amount".to_string(),
|
||||||
@@ -369,6 +393,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "improvement_surcharge".to_string(),
|
name: "improvement_surcharge".to_string(),
|
||||||
@@ -377,6 +402,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "total_amount".to_string(),
|
name: "total_amount".to_string(),
|
||||||
@@ -385,6 +411,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "congestion_surcharge".to_string(),
|
name: "congestion_surcharge".to_string(),
|
||||||
@@ -393,6 +420,7 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "airport_fee".to_string(),
|
name: "airport_fee".to_string(),
|
||||||
@@ -401,35 +429,36 @@ fn create_table_expr() -> CreateTableExpr {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
time_index: "tpep_pickup_datetime".to_string(),
|
time_index: "tpep_pickup_datetime".to_string(),
|
||||||
primary_keys: vec!["VendorID".to_string()],
|
primary_keys: vec!["VendorID".to_string()],
|
||||||
create_if_not_exists: false,
|
create_if_not_exists: true,
|
||||||
table_options: Default::default(),
|
table_options: Default::default(),
|
||||||
table_id: None,
|
table_id: None,
|
||||||
engine: "mito".to_string(),
|
engine: "mito".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn query_set() -> HashMap<String, String> {
|
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||||
HashMap::from([
|
HashMap::from([
|
||||||
(
|
(
|
||||||
"count_all".to_string(),
|
"count_all".to_string(),
|
||||||
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
|
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"fare_amt_by_passenger".to_string(),
|
"fare_amt_by_passenger".to_string(),
|
||||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count"),
|
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||||
)
|
)
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_write(args: &Args, db: &Database) {
|
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||||
let mut write_jobs = JoinSet::new();
|
let mut write_jobs = JoinSet::new();
|
||||||
|
|
||||||
let create_table_result = db.create(create_table_expr()).await;
|
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||||
println!("Create table result: {create_table_result:?}");
|
println!("Create table result: {create_table_result:?}");
|
||||||
|
|
||||||
let progress_bar_style = ProgressStyle::with_template(
|
let progress_bar_style = ProgressStyle::with_template(
|
||||||
@@ -447,8 +476,10 @@ async fn do_write(args: &Args, db: &Database) {
|
|||||||
let db = db.clone();
|
let db = db.clone();
|
||||||
let mpb = multi_progress_bar.clone();
|
let mpb = multi_progress_bar.clone();
|
||||||
let pb_style = progress_bar_style.clone();
|
let pb_style = progress_bar_style.clone();
|
||||||
let _ = write_jobs
|
let table_name = table_name.to_string();
|
||||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
let _ = write_jobs.spawn(async move {
|
||||||
|
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
while write_jobs.join_next().await.is_some() {
|
while write_jobs.join_next().await.is_some() {
|
||||||
@@ -457,24 +488,32 @@ async fn do_write(args: &Args, db: &Database) {
|
|||||||
let db = db.clone();
|
let db = db.clone();
|
||||||
let mpb = multi_progress_bar.clone();
|
let mpb = multi_progress_bar.clone();
|
||||||
let pb_style = progress_bar_style.clone();
|
let pb_style = progress_bar_style.clone();
|
||||||
let _ = write_jobs
|
let table_name = table_name.to_string();
|
||||||
.spawn(async move { write_data(batch_size, &db, path, mpb, pb_style).await });
|
let _ = write_jobs.spawn(async move {
|
||||||
|
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_query(num_iter: usize, db: &Database) {
|
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||||
for (query_name, query) in query_set() {
|
for (query_name, query) in query_set(table_name) {
|
||||||
println!("Running query: {query}");
|
println!("Running query: {query}");
|
||||||
for i in 0..num_iter {
|
for i in 0..num_iter {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let _res = db.sql(&query).await.unwrap();
|
let res = db.sql(&query).await.unwrap();
|
||||||
|
match res {
|
||||||
|
Output::AffectedRows(_) | Output::RecordBatches(_) => (),
|
||||||
|
Output::Stream(stream) => {
|
||||||
|
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
let elapsed = now.elapsed();
|
let elapsed = now.elapsed();
|
||||||
println!(
|
println!(
|
||||||
"query {}, iteration {}: {}ms",
|
"query {}, iteration {}: {}ms",
|
||||||
query_name,
|
query_name,
|
||||||
i,
|
i,
|
||||||
elapsed.as_millis()
|
elapsed.as_millis(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -491,13 +530,14 @@ fn main() {
|
|||||||
.block_on(async {
|
.block_on(async {
|
||||||
let client = Client::with_urls(vec![&args.endpoint]);
|
let client = Client::with_urls(vec![&args.endpoint]);
|
||||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||||
|
let table_name = new_table_name();
|
||||||
|
|
||||||
if !args.skip_write {
|
if !args.skip_write {
|
||||||
do_write(&args, &db).await;
|
do_write(&args, &db, &table_name).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !args.skip_read {
|
if !args.skip_read {
|
||||||
do_query(args.iter_num, &db).await;
|
do_query(args.iter_num, &db, &table_name).await;
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,26 +12,36 @@ rpc_runtime_size = 8
|
|||||||
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
require_lease_before_startup = false
|
require_lease_before_startup = false
|
||||||
|
|
||||||
|
# Initialize all regions in the background during the startup.
|
||||||
|
# By default, it provides services after all regions have been initialized.
|
||||||
|
initialize_region_in_background = false
|
||||||
|
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 3000 by default.
|
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
|
||||||
interval_millis = 3000
|
interval = "3s"
|
||||||
|
|
||||||
# Metasrv client options.
|
# Metasrv client options.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
# Metasrv address list.
|
# Metasrv address list.
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
# Heartbeat timeout in milliseconds, 500 by default.
|
# Heartbeat timeout, 500 milliseconds by default.
|
||||||
heartbeat_timeout_millis = 500
|
heartbeat_timeout = "500ms"
|
||||||
# Operation timeout in milliseconds, 3000 by default.
|
# Operation timeout, 3 seconds by default.
|
||||||
timeout_millis = 3000
|
timeout = "3s"
|
||||||
# Connect server timeout in milliseconds, 5000 by default.
|
# Connect server timeout, 1 second by default.
|
||||||
connect_timeout_millis = 1000
|
connect_timeout = "1s"
|
||||||
# `TCP_NODELAY` option for accepted connections, true by default.
|
# `TCP_NODELAY` option for accepted connections, true by default.
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
# WAL options, see `standalone.example.toml`.
|
# WAL options.
|
||||||
|
# Currently, users are expected to choose the wal through the provider field.
|
||||||
|
# When a wal provider is chose, the user should comment out all other wal config
|
||||||
|
# except those corresponding to the chosen one.
|
||||||
[wal]
|
[wal]
|
||||||
# WAL data directory
|
# WAL data directory
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
# Raft-engine wal options, see `standalone.example.toml`.
|
||||||
# dir = "/tmp/greptimedb/wal"
|
# dir = "/tmp/greptimedb/wal"
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
purge_threshold = "4GB"
|
purge_threshold = "4GB"
|
||||||
@@ -39,10 +49,21 @@ purge_interval = "10m"
|
|||||||
read_batch_size = 128
|
read_batch_size = 128
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
|
# Kafka wal options, see `standalone.example.toml`.
|
||||||
|
# broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
# max_batch_size = "4MB"
|
||||||
|
# linger = "200ms"
|
||||||
|
# produce_record_timeout = "100ms"
|
||||||
|
# backoff_init = "500ms"
|
||||||
|
# backoff_max = "10s"
|
||||||
|
# backoff_base = 2
|
||||||
|
# backoff_deadline = "5mins"
|
||||||
|
|
||||||
# Storage options, see `standalone.example.toml`.
|
# Storage options, see `standalone.example.toml`.
|
||||||
[storage]
|
[storage]
|
||||||
# The working home directory.
|
# The working home directory.
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
# Storage type.
|
||||||
type = "File"
|
type = "File"
|
||||||
# TTL for all tables. Disabled by default.
|
# TTL for all tables. Disabled by default.
|
||||||
# global_ttl = "7d"
|
# global_ttl = "7d"
|
||||||
@@ -51,34 +72,13 @@ type = "File"
|
|||||||
# The local file cache directory
|
# The local file cache directory
|
||||||
# cache_path = "/path/local_cache"
|
# cache_path = "/path/local_cache"
|
||||||
# The local file cache capacity in bytes.
|
# The local file cache capacity in bytes.
|
||||||
# cache_capacity = "256Mib"
|
# cache_capacity = "256MB"
|
||||||
|
|
||||||
# Compaction options, see `standalone.example.toml`.
|
# Custom storage options
|
||||||
[storage.compaction]
|
#[[storage.providers]]
|
||||||
max_inflight_tasks = 4
|
#type = "S3"
|
||||||
max_files_in_level0 = 8
|
#[[storage.providers]]
|
||||||
max_purge_tasks = 32
|
#type = "Gcs"
|
||||||
|
|
||||||
# Storage manifest options
|
|
||||||
[storage.manifest]
|
|
||||||
# Region checkpoint actions margin.
|
|
||||||
# Create a checkpoint every <checkpoint_margin> actions.
|
|
||||||
checkpoint_margin = 10
|
|
||||||
# Region manifest logs and checkpoints gc execution duration
|
|
||||||
gc_duration = '10m'
|
|
||||||
|
|
||||||
# Storage flush options
|
|
||||||
[storage.flush]
|
|
||||||
# Max inflight flush tasks.
|
|
||||||
max_flush_tasks = 8
|
|
||||||
# Default write buffer size for a region.
|
|
||||||
region_write_buffer_size = "32MB"
|
|
||||||
# Interval to check whether a region needs flush.
|
|
||||||
picker_schedule_interval = "5m"
|
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
|
||||||
auto_flush_interval = "1h"
|
|
||||||
# Global write buffer size for all regions.
|
|
||||||
global_write_buffer_size = "1GB"
|
|
||||||
|
|
||||||
# Mito engine options
|
# Mito engine options
|
||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
@@ -91,8 +91,8 @@ worker_channel_size = 128
|
|||||||
worker_request_batch_size = 64
|
worker_request_batch_size = 64
|
||||||
# Number of meta action updated to trigger a new checkpoint for the manifest
|
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||||
manifest_checkpoint_distance = 10
|
manifest_checkpoint_distance = 10
|
||||||
# Manifest compression type
|
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
manifest_compress_type = "Uncompressed"
|
compress_manifest = false
|
||||||
# Max number of running background jobs
|
# Max number of running background jobs
|
||||||
max_background_jobs = 4
|
max_background_jobs = 4
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
# Interval to auto flush a region if it has not flushed yet.
|
||||||
@@ -101,10 +101,39 @@ auto_flush_interval = "1h"
|
|||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
# Global write buffer size threshold to reject write requests (default 2G).
|
# Global write buffer size threshold to reject write requests (default 2G).
|
||||||
global_write_buffer_reject_size = "2GB"
|
global_write_buffer_reject_size = "2GB"
|
||||||
|
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||||
|
sst_meta_cache_size = "128MB"
|
||||||
|
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
vector_cache_size = "512MB"
|
||||||
|
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
page_cache_size = "512MB"
|
||||||
|
# Buffer size for SST writing.
|
||||||
|
sst_write_buffer_size = "8MB"
|
||||||
|
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
|
# - 0: using the default value (1/4 of cpu cores).
|
||||||
|
# - 1: scan in current thread.
|
||||||
|
# - n: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||||
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
# Log options
|
# Log options, see `standalone.example.toml`
|
||||||
# [logging]
|
# [logging]
|
||||||
# Specify logs directory.
|
|
||||||
# dir = "/tmp/greptimedb/logs"
|
# dir = "/tmp/greptimedb/logs"
|
||||||
# Specify the log level [info | debug | error | warn]
|
|
||||||
# level = "info"
|
# level = "info"
|
||||||
|
|
||||||
|
# Datanode export the metrics generated by itself
|
||||||
|
# encoded to Prometheus remote-write format
|
||||||
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
|
# whether enable export metrics, default is false
|
||||||
|
# enable = false
|
||||||
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
|
# endpoint = "127.0.0.1:4000"
|
||||||
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
|
# db = ""
|
||||||
|
# The interval of export metrics
|
||||||
|
# write_interval = "30s"
|
||||||
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# Node running mode, see `standalone.example.toml`.
|
# Node running mode, see `standalone.example.toml`.
|
||||||
mode = "distributed"
|
mode = "distributed"
|
||||||
|
# The default timezone of the server
|
||||||
|
# default_timezone = "UTC"
|
||||||
|
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
|
# Interval for sending heartbeat task to the Metasrv, 5 seconds by default.
|
||||||
interval_millis = 5000
|
interval = "5s"
|
||||||
# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
|
# Interval for retry sending heartbeat task, 5 seconds by default.
|
||||||
retry_interval_millis = 5000
|
retry_interval = "5s"
|
||||||
|
|
||||||
# HTTP server options, see `standalone.example.toml`.
|
# HTTP server options, see `standalone.example.toml`.
|
||||||
[http]
|
[http]
|
||||||
@@ -59,10 +61,10 @@ enable = true
|
|||||||
# Metasrv client options, see `datanode.example.toml`.
|
# Metasrv client options, see `datanode.example.toml`.
|
||||||
[meta_client]
|
[meta_client]
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
timeout_millis = 3000
|
timeout = "3s"
|
||||||
# DDL timeouts options.
|
# DDL timeouts options.
|
||||||
ddl_timeout_millis = 10000
|
ddl_timeout = "10s"
|
||||||
connect_timeout_millis = 1000
|
connect_timeout = "1s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
# Log options, see `standalone.example.toml`
|
# Log options, see `standalone.example.toml`
|
||||||
@@ -77,3 +79,19 @@ tcp_nodelay = true
|
|||||||
timeout = "10s"
|
timeout = "10s"
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
|
# Frontend export the metrics generated by itself
|
||||||
|
# encoded to Prometheus remote-write format
|
||||||
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
|
# whether enable export metrics, default is false
|
||||||
|
# enable = false
|
||||||
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
|
# endpoint = "127.0.0.1:4000"
|
||||||
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
|
# db = ""
|
||||||
|
# The interval of export metrics
|
||||||
|
# write_interval = "30s"
|
||||||
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
|
|||||||
@@ -7,14 +7,16 @@ server_addr = "127.0.0.1:3002"
|
|||||||
# Etcd server address, "127.0.0.1:2379" by default.
|
# Etcd server address, "127.0.0.1:2379" by default.
|
||||||
store_addr = "127.0.0.1:2379"
|
store_addr = "127.0.0.1:2379"
|
||||||
# Datanode selector type.
|
# Datanode selector type.
|
||||||
# - "LeaseBased" (default value).
|
# - "lease_based" (default value).
|
||||||
# - "LoadBased"
|
# - "load_based"
|
||||||
# For details, please see "https://docs.greptime.com/developer-guide/meta/selector".
|
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
|
||||||
selector = "LeaseBased"
|
selector = "lease_based"
|
||||||
# Store data in memory, false by default.
|
# Store data in memory, false by default.
|
||||||
use_memory_store = false
|
use_memory_store = false
|
||||||
# Whether to enable greptimedb telemetry, true by default.
|
# Whether to enable greptimedb telemetry, true by default.
|
||||||
enable_telemetry = true
|
enable_telemetry = true
|
||||||
|
# If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
|
store_key_prefix = ""
|
||||||
|
|
||||||
# Log options, see `standalone.example.toml`
|
# Log options, see `standalone.example.toml`
|
||||||
# [logging]
|
# [logging]
|
||||||
@@ -28,10 +30,67 @@ max_retry_times = 12
|
|||||||
# Initial retry delay of procedures, increases exponentially
|
# Initial retry delay of procedures, increases exponentially
|
||||||
retry_delay = "500ms"
|
retry_delay = "500ms"
|
||||||
|
|
||||||
|
# Failure detectors options.
|
||||||
|
[failure_detector]
|
||||||
|
threshold = 8.0
|
||||||
|
min_std_deviation = "100ms"
|
||||||
|
acceptable_heartbeat_pause = "3000ms"
|
||||||
|
first_heartbeat_estimate = "1000ms"
|
||||||
|
|
||||||
# # Datanode options.
|
# # Datanode options.
|
||||||
# [datanode]
|
# [datanode]
|
||||||
# # Datanode client options.
|
# # Datanode client options.
|
||||||
# [datanode.client_options]
|
# [datanode.client_options]
|
||||||
# timeout_millis = 10000
|
# timeout = "10s"
|
||||||
# connect_timeout_millis = 10000
|
# connect_timeout = "10s"
|
||||||
# tcp_nodelay = true
|
# tcp_nodelay = true
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
# Available wal providers:
|
||||||
|
# - "raft_engine" (default)
|
||||||
|
# - "kafka"
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||||
|
|
||||||
|
# Kafka wal config.
|
||||||
|
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||||
|
# broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
# Number of topics to be created upon start.
|
||||||
|
# num_topics = 64
|
||||||
|
# Topic selector type.
|
||||||
|
# Available selector types:
|
||||||
|
# - "round_robin" (default)
|
||||||
|
# selector_type = "round_robin"
|
||||||
|
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
# topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
# Number of partitions per topic.
|
||||||
|
# num_partitions = 1
|
||||||
|
# Expected number of replicas of each partition.
|
||||||
|
# replication_factor = 1
|
||||||
|
# Above which a topic creation operation will be cancelled.
|
||||||
|
# create_topic_timeout = "30s"
|
||||||
|
# The initial backoff for kafka clients.
|
||||||
|
# backoff_init = "500ms"
|
||||||
|
# The maximum backoff for kafka clients.
|
||||||
|
# backoff_max = "10s"
|
||||||
|
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
# backoff_base = 2
|
||||||
|
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
|
# backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
# Metasrv export the metrics generated by itself
|
||||||
|
# encoded to Prometheus remote-write format
|
||||||
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
|
# whether enable export metrics, default is false
|
||||||
|
# enable = false
|
||||||
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
|
# endpoint = "127.0.0.1:4000"
|
||||||
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
|
# db = ""
|
||||||
|
# The interval of export metrics
|
||||||
|
# write_interval = "30s"
|
||||||
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
mode = "standalone"
|
mode = "standalone"
|
||||||
# Whether to enable greptimedb telemetry, true by default.
|
# Whether to enable greptimedb telemetry, true by default.
|
||||||
enable_telemetry = true
|
enable_telemetry = true
|
||||||
|
# The default timezone of the server
|
||||||
|
# default_timezone = "UTC"
|
||||||
|
|
||||||
# HTTP server options.
|
# HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
@@ -80,8 +82,51 @@ enable = true
|
|||||||
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
|
||||||
enable = true
|
enable = true
|
||||||
|
|
||||||
# WAL options.
|
|
||||||
[wal]
|
[wal]
|
||||||
|
# Available wal providers:
|
||||||
|
# - "raft_engine" (default)
|
||||||
|
# - "kafka"
|
||||||
|
provider = "raft_engine"
|
||||||
|
|
||||||
|
# There're none raft-engine wal config since meta srv only involves in remote wal currently.
|
||||||
|
|
||||||
|
# Kafka wal options.
|
||||||
|
# The broker endpoints of the Kafka cluster. ["127.0.0.1:9092"] by default.
|
||||||
|
# broker_endpoints = ["127.0.0.1:9092"]
|
||||||
|
|
||||||
|
# Number of topics to be created upon start.
|
||||||
|
# num_topics = 64
|
||||||
|
# Topic selector type.
|
||||||
|
# Available selector types:
|
||||||
|
# - "round_robin" (default)
|
||||||
|
# selector_type = "round_robin"
|
||||||
|
# A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
|
||||||
|
# topic_name_prefix = "greptimedb_wal_topic"
|
||||||
|
# Number of partitions per topic.
|
||||||
|
# num_partitions = 1
|
||||||
|
# Expected number of replicas of each partition.
|
||||||
|
# replication_factor = 1
|
||||||
|
|
||||||
|
# The maximum log size a kafka batch producer could buffer.
|
||||||
|
# max_batch_size = "4MB"
|
||||||
|
# The linger duration of a kafka batch producer.
|
||||||
|
# linger = "200ms"
|
||||||
|
# The maximum amount of time (in milliseconds) to wait for Kafka records to be returned.
|
||||||
|
# produce_record_timeout = "100ms"
|
||||||
|
# Above which a topic creation operation will be cancelled.
|
||||||
|
# create_topic_timeout = "30s"
|
||||||
|
|
||||||
|
# The initial backoff for kafka clients.
|
||||||
|
# backoff_init = "500ms"
|
||||||
|
# The maximum backoff for kafka clients.
|
||||||
|
# backoff_max = "10s"
|
||||||
|
# Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||||
|
# backoff_base = 2
|
||||||
|
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||||
|
# backoff_deadline = "5mins"
|
||||||
|
|
||||||
|
# WAL data directory
|
||||||
|
# dir = "/tmp/greptimedb/wal"
|
||||||
# WAL file size in bytes.
|
# WAL file size in bytes.
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
# WAL purge threshold.
|
# WAL purge threshold.
|
||||||
@@ -93,8 +138,8 @@ read_batch_size = 128
|
|||||||
# Whether to sync log file after every write.
|
# Whether to sync log file after every write.
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
# Kv options.
|
# Metadata storage options.
|
||||||
[kv_store]
|
[metadata_store]
|
||||||
# Kv file size in bytes.
|
# Kv file size in bytes.
|
||||||
file_size = "256MB"
|
file_size = "256MB"
|
||||||
# Kv purge threshold.
|
# Kv purge threshold.
|
||||||
@@ -118,37 +163,50 @@ type = "File"
|
|||||||
# Cache configuration for object storage such as 'S3' etc.
|
# Cache configuration for object storage such as 'S3' etc.
|
||||||
# cache_path = "/path/local_cache"
|
# cache_path = "/path/local_cache"
|
||||||
# The local file cache capacity in bytes.
|
# The local file cache capacity in bytes.
|
||||||
# cache_capacity = "256Mib"
|
# cache_capacity = "256MB"
|
||||||
|
|
||||||
# Compaction options.
|
# Custom storage options
|
||||||
[storage.compaction]
|
#[[storage.providers]]
|
||||||
# Max task number that can concurrently run.
|
#type = "S3"
|
||||||
max_inflight_tasks = 4
|
#[[storage.providers]]
|
||||||
# Max files in level 0 to trigger compaction.
|
#type = "Gcs"
|
||||||
max_files_in_level0 = 8
|
|
||||||
# Max task number for SST purge task after compaction.
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
# Storage manifest options
|
# Mito engine options
|
||||||
[storage.manifest]
|
[[region_engine]]
|
||||||
# Region checkpoint actions margin.
|
[region_engine.mito]
|
||||||
# Create a checkpoint every <checkpoint_margin> actions.
|
# Number of region workers
|
||||||
checkpoint_margin = 10
|
num_workers = 8
|
||||||
# Region manifest logs and checkpoints gc execution duration
|
# Request channel size of each worker
|
||||||
gc_duration = '10m'
|
worker_channel_size = 128
|
||||||
|
# Max batch size for a worker to handle requests
|
||||||
# Storage flush options
|
worker_request_batch_size = 64
|
||||||
[storage.flush]
|
# Number of meta action updated to trigger a new checkpoint for the manifest
|
||||||
# Max inflight flush tasks.
|
manifest_checkpoint_distance = 10
|
||||||
max_flush_tasks = 8
|
# Whether to compress manifest and checkpoint file by gzip (default false).
|
||||||
# Default write buffer size for a region.
|
compress_manifest = false
|
||||||
region_write_buffer_size = "32MB"
|
# Max number of running background jobs
|
||||||
# Interval to check whether a region needs flush.
|
max_background_jobs = 4
|
||||||
picker_schedule_interval = "5m"
|
|
||||||
# Interval to auto flush a region if it has not flushed yet.
|
# Interval to auto flush a region if it has not flushed yet.
|
||||||
auto_flush_interval = "1h"
|
auto_flush_interval = "1h"
|
||||||
# Global write buffer size for all regions.
|
# Global write buffer size for all regions.
|
||||||
global_write_buffer_size = "1GB"
|
global_write_buffer_size = "1GB"
|
||||||
|
# Global write buffer size threshold to reject write requests (default 2G).
|
||||||
|
global_write_buffer_reject_size = "2GB"
|
||||||
|
# Cache size for SST metadata (default 128MB). Setting it to 0 to disable the cache.
|
||||||
|
sst_meta_cache_size = "128MB"
|
||||||
|
# Cache size for vectors and arrow arrays (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
vector_cache_size = "512MB"
|
||||||
|
# Cache size for pages of SST row groups (default 512MB). Setting it to 0 to disable the cache.
|
||||||
|
page_cache_size = "512MB"
|
||||||
|
# Buffer size for SST writing.
|
||||||
|
sst_write_buffer_size = "8MB"
|
||||||
|
# Parallelism to scan a region (default: 1/4 of cpu cores).
|
||||||
|
# - 0: using the default value (1/4 of cpu cores).
|
||||||
|
# - 1: scan in current thread.
|
||||||
|
# - n: scan in parallelism n.
|
||||||
|
scan_parallelism = 0
|
||||||
|
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
|
||||||
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
# Log options
|
# Log options
|
||||||
# [logging]
|
# [logging]
|
||||||
@@ -156,3 +214,27 @@ global_write_buffer_size = "1GB"
|
|||||||
# dir = "/tmp/greptimedb/logs"
|
# dir = "/tmp/greptimedb/logs"
|
||||||
# Specify the log level [info | debug | error | warn]
|
# Specify the log level [info | debug | error | warn]
|
||||||
# level = "info"
|
# level = "info"
|
||||||
|
# whether enable tracing, default is false
|
||||||
|
# enable_otlp_tracing = false
|
||||||
|
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
|
||||||
|
# otlp_endpoint = "localhost:4317"
|
||||||
|
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||||
|
# tracing_sample_ratio = 1.0
|
||||||
|
# Whether to append logs to stdout. Defaults to true.
|
||||||
|
# append_stdout = true
|
||||||
|
|
||||||
|
# Standalone export the metrics generated by itself
|
||||||
|
# encoded to Prometheus remote-write format
|
||||||
|
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
|
||||||
|
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
||||||
|
# [export_metrics]
|
||||||
|
# whether enable export metrics, default is false
|
||||||
|
# enable = false
|
||||||
|
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
|
||||||
|
# endpoint = "127.0.0.1:4000"
|
||||||
|
# The database name of exported metrics stores, user needs to specify a valid database
|
||||||
|
# db = ""
|
||||||
|
# The interval of export metrics
|
||||||
|
# write_interval = "30s"
|
||||||
|
# HTTP headers of Prometheus remote-write carry
|
||||||
|
# headers = {}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:22.04 as builder
|
FROM ubuntu:20.04 as builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
@@ -7,6 +7,11 @@ ARG OUTPUT_DIR
|
|||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Add PPA for Python 3.10.
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||||
|
add-apt-repository ppa:deadsnakes/ppa -y
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
apt-get update && apt-get install -y \
|
apt-get update && apt-get install -y \
|
||||||
|
|||||||
@@ -26,4 +26,5 @@ ARG RUST_TOOLCHAIN
|
|||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-nextest --locked
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
@@ -1,8 +1,13 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Add PPA for Python 3.10.
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
|
||||||
|
add-apt-repository ppa:deadsnakes/ppa -y
|
||||||
|
|
||||||
# Install dependencies.
|
# Install dependencies.
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
@@ -14,8 +19,13 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|||||||
build-essential \
|
build-essential \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
python3.10 \
|
python3.10 \
|
||||||
python3.10-dev \
|
python3.10-dev
|
||||||
python3-pip
|
|
||||||
|
# Remove Python 3.8 and install pip.
|
||||||
|
RUN apt-get -y purge python3.8 && \
|
||||||
|
apt-get -y autoremove && \
|
||||||
|
ln -s /usr/bin/python3.10 /usr/bin/python3 && \
|
||||||
|
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||||
|
|
||||||
RUN git config --global --add safe.directory /greptimedb
|
RUN git config --global --add safe.directory /greptimedb
|
||||||
|
|
||||||
@@ -33,4 +43,5 @@ ARG RUST_TOOLCHAIN
|
|||||||
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
# Install nextest.
|
# Install nextest.
|
||||||
RUN cargo install cargo-nextest --locked
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
|
|||||||
48
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
48
docker/dev-builder/ubuntu/Dockerfile-18.10
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Use the legacy glibc 2.28.
|
||||||
|
FROM ubuntu:18.10
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
|
||||||
|
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
|
||||||
|
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
unzip \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
# Install protoc.
|
||||||
|
ENV PROTOC_VERSION=25.1
|
||||||
|
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
|
||||||
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture"; exit 1; \
|
||||||
|
fi && \
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
|
||||||
|
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
|
||||||
|
rm -f ${PROTOC_ZIP}
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH /root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Install Rust toolchains.
|
||||||
|
ARG RUST_TOOLCHAIN
|
||||||
|
RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||||
|
|
||||||
|
# Install nextest.
|
||||||
|
RUN cargo install cargo-binstall --locked
|
||||||
|
RUN cargo binstall cargo-nextest --no-confirm
|
||||||
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
61
docs/benchmarks/tsbs/v0.4.0.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# TSBS benchmark - v0.4.0
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
### Local
|
||||||
|
| | |
|
||||||
|
| ------ | ---------------------------------- |
|
||||||
|
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||||
|
| Memory | 32GB |
|
||||||
|
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||||
|
| OS | Ubuntu 22.04.2 LTS |
|
||||||
|
|
||||||
|
### Aliyun amd64
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------- | -------------- |
|
||||||
|
| Machine | ecs.g7.4xlarge |
|
||||||
|
| CPU | 16 core |
|
||||||
|
| Memory | 64GB |
|
||||||
|
| Disk | 100G |
|
||||||
|
| OS | Ubuntu 22.04 |
|
||||||
|
|
||||||
|
### Aliyun arm64
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------- | ----------------- |
|
||||||
|
| Machine | ecs.g8y.4xlarge |
|
||||||
|
| CPU | 16 core |
|
||||||
|
| Memory | 64GB |
|
||||||
|
| Disk | 100G |
|
||||||
|
| OS | Ubuntu 22.04 ARM |
|
||||||
|
|
||||||
|
|
||||||
|
## Write performance
|
||||||
|
|
||||||
|
| Environment | Ingest rate(rows/s) |
|
||||||
|
| ------------------ | --------------------- |
|
||||||
|
| Local | 365280.60 |
|
||||||
|
| Aliyun g7.4xlarge | 341368.72 |
|
||||||
|
| Aliyun g8y.4xlarge | 320907.29 |
|
||||||
|
|
||||||
|
|
||||||
|
## Query performance
|
||||||
|
|
||||||
|
| Query type | Local (ms) | Aliyun g7.4xlarge (ms) | Aliyun g8y.4xlarge (ms) |
|
||||||
|
| --------------------- | ---------- | ---------------------- | ----------------------- |
|
||||||
|
| cpu-max-all-1 | 50.70 | 31.46 | 47.61 |
|
||||||
|
| cpu-max-all-8 | 262.16 | 129.26 | 152.43 |
|
||||||
|
| double-groupby-1 | 2512.71 | 1408.19 | 1586.10 |
|
||||||
|
| double-groupby-5 | 3896.15 | 2304.29 | 2585.29 |
|
||||||
|
| double-groupby-all | 5404.67 | 3337.61 | 3773.91 |
|
||||||
|
| groupby-orderby-limit | 3786.98 | 2065.72 | 2312.57 |
|
||||||
|
| high-cpu-1 | 71.96 | 37.29 | 54.01 |
|
||||||
|
| high-cpu-all | 9468.75 | 7595.69 | 8467.46 |
|
||||||
|
| lastpoint | 13379.43 | 11253.76 | 12949.40 |
|
||||||
|
| single-groupby-1-1-1 | 20.72 | 12.16 | 13.35 |
|
||||||
|
| single-groupby-1-1-12 | 28.53 | 15.67 | 21.62 |
|
||||||
|
| single-groupby-1-8-1 | 72.23 | 37.90 | 43.52 |
|
||||||
|
| single-groupby-5-1-1 | 26.75 | 15.59 | 17.48 |
|
||||||
|
| single-groupby-5-1-12 | 45.41 | 22.90 | 31.96 |
|
||||||
|
| single-groupby-5-8-1 | 107.96 | 59.76 | 69.58 |
|
||||||
@@ -50,10 +50,10 @@ The concept "Table" in GreptimeDB is a bit "heavy" compared to other time-series
|
|||||||
```
|
```
|
||||||
|
|
||||||
The following parts will describe these implementation details:
|
The following parts will describe these implementation details:
|
||||||
- How to route these metric region tables and how those table are distributed
|
- How to route these metric region tables and how those table are distributed
|
||||||
- How to maintain the schema and other metadata of the underlying mito engine table
|
- How to maintain the schema and other metadata of the underlying mito engine table
|
||||||
- How to maintain the schema of metric engine table
|
- How to maintain the schema of metric engine table
|
||||||
- How the query goes
|
- How the query goes
|
||||||
|
|
||||||
## Routing
|
## Routing
|
||||||
|
|
||||||
|
|||||||
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
113
docs/rfcs/2023-11-03-inverted-index.md
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Inverted Index for SST File
|
||||||
|
Tracking Issue: TBD
|
||||||
|
Date: 2023-11-03
|
||||||
|
Author: "Zhong Zhenchi <zhongzc_arch@outlook.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes an optimization towards the storage engine by introducing an inverted indexing methodology aimed at optimizing label selection queries specifically pertaining to Metrics with tag columns as the target for optimization.
|
||||||
|
|
||||||
|
# Introduction
|
||||||
|
In the current system setup, in the Mito Engine, the first column of Primary Keys has a Min-Max index, which significantly optimizes the outcome. However, there are limitations when it comes to other columns, primarily tags. This RFC suggests the implementation of an inverted index to provide enhanced filtering benefits to bridge these limitations and improve overall system performance.
|
||||||
|
|
||||||
|
# Design Detail
|
||||||
|
|
||||||
|
## Inverted Index
|
||||||
|
|
||||||
|
The primary aim of the proposed inverted index is to optimize tag columns in the SST Parquet Files within the Mito Engine. The mapping and construction of an inverted index, from Tag Values to Row Groups, enables efficient logical structures that provide faster and more flexible queries.
|
||||||
|
|
||||||
|
When scanning SST Files, pushed-down filters applied to a respective Tag's inverted index, determine the final Row Groups to be indexed and scanned, further bolstering the speed and efficiency of data retrieval processes.
|
||||||
|
|
||||||
|
## Index Format
|
||||||
|
|
||||||
|
The Inverted Index for each SST file follows the format shown below:
|
||||||
|
|
||||||
|
```
|
||||||
|
inverted_index₀ inverted_index₁ ... inverted_indexₙ footer
|
||||||
|
```
|
||||||
|
|
||||||
|
The structure inside each Inverted Index is as followed:
|
||||||
|
|
||||||
|
```
|
||||||
|
bitmap₀ bitmap₁ bitmap₂ ... bitmapₙ null_bitmap fst
|
||||||
|
```
|
||||||
|
|
||||||
|
The format is encapsulated by a footer:
|
||||||
|
|
||||||
|
```
|
||||||
|
footer_payload footer_payload_size
|
||||||
|
```
|
||||||
|
|
||||||
|
The `footer_payload` is presented in protobuf encoding of `InvertedIndexFooter`.
|
||||||
|
|
||||||
|
The complete format is containerized in [Puffin](https://iceberg.apache.org/puffin-spec/) with the type defined as `greptime-inverted-index-v1`.
|
||||||
|
|
||||||
|
## Protobuf Details
|
||||||
|
|
||||||
|
The `InvertedIndexFooter` is defined in the following protobuf structure:
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
message InvertedIndexFooter {
|
||||||
|
repeated InvertedIndexMeta metas;
|
||||||
|
}
|
||||||
|
|
||||||
|
message InvertedIndexMeta {
|
||||||
|
string name;
|
||||||
|
uint64 row_count_in_group;
|
||||||
|
uint64 fst_offset;
|
||||||
|
uint64 fst_size;
|
||||||
|
uint64 null_bitmap_offset;
|
||||||
|
uint64 null_bitmap_size;
|
||||||
|
InvertedIndexStats stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
message InvertedIndexStats {
|
||||||
|
uint64 null_count;
|
||||||
|
uint64 distinct_count;
|
||||||
|
bytes min_value;
|
||||||
|
bytes max_value;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Bitmap
|
||||||
|
|
||||||
|
Bitmaps are used to represent indices of fixed-size groups. Rows are divided into groups of a fixed size, defined in the `InvertedIndexMeta` as `row_count_in_group`.
|
||||||
|
|
||||||
|
For example, when `row_count_in_group` is `4096`, it means each group has `4096` rows. If there are a total of `10000` rows, there will be `3` groups in total. The first two groups will have `4096` rows each, and the last group will have `1808` rows. If the indexed values are found in row `200` and `9000`, they will correspond to groups `0` and `2`, respectively. Therefore, the bitmap should show `0` and `2`.
|
||||||
|
|
||||||
|
Bitmap is implemented using [BitVec](https://docs.rs/bitvec/latest/bitvec/), selected due to its efficient representation of dense data arrays typical of indices of groups.
|
||||||
|
|
||||||
|
|
||||||
|
## Finite State Transducer (FST)
|
||||||
|
|
||||||
|
[FST](https://docs.rs/fst/latest/fst/) is a highly efficient data structure ideal for in-memory indexing. It represents ordered sets or maps where the keys are bytes. The choice of the FST effectively balances the need for performance, space efficiency, and the ability to perform complex analyses such as regular expression matching.
|
||||||
|
|
||||||
|
The conventional usage of FST and `u64` values has been adapted to facilitate indirect indexing to row groups. As the row groups are represented as Bitmaps, we utilize the `u64` values split into bitmap's offset (higher 32 bits) and size (lower 32 bits) to represent the location of these Bitmaps.
|
||||||
|
|
||||||
|
## API Design
|
||||||
|
|
||||||
|
Two APIs `InvertedIndexBuilder` for building indexes and `InvertedIndexSearcher` for querying indexes are designed:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
type Bytes = Vec<u8>;
|
||||||
|
type GroupId = u64;
|
||||||
|
|
||||||
|
trait InvertedIndexBuilder {
|
||||||
|
fn add(&mut self, name: &str, value: Option<&Bytes>, group_id: GroupId) -> Result<()>;
|
||||||
|
fn finish(&mut self) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Predicate {
|
||||||
|
Gt(Bytes),
|
||||||
|
GtEq(Bytes),
|
||||||
|
Lt(Bytes),
|
||||||
|
LtEq(Bytes),
|
||||||
|
InList(Vec<Bytes>),
|
||||||
|
RegexMatch(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
trait InvertedIndexSearcher {
|
||||||
|
fn search(&mut self, name: &str, predicates: &[Predicate]) -> Result<impl IntoIterator<GroupId>>;
|
||||||
|
}
|
||||||
|
```
|
||||||
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
169
docs/rfcs/2023-11-07-region-migration.md
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Region Migration Procedure
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2700
|
||||||
|
Date: 2023-11-03
|
||||||
|
Author: "Xu Wenkang <wenymedia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes a way that brings the ability of Meta Server to move regions between the Datanodes.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
Typically, We need this ability in the following scenarios:
|
||||||
|
- Migrate hot-spot Regions to idle Datanode
|
||||||
|
- Move the failure Regions to an available Datanode
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
style Start fill:#85CB90,color:#fff
|
||||||
|
style End fill:#85CB90,color:#fff
|
||||||
|
style SelectCandidate fill:#F38488,color:#fff
|
||||||
|
style OpenCandidate fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataDown fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataUp fill:#F38488,color:#fff
|
||||||
|
style UpdateMetadataRollback fill:#F38488,color:#fff
|
||||||
|
style DowngradeLeader fill:#F38488,color:#fff
|
||||||
|
style UpgradeCandidate fill:#F38488,color:#fff
|
||||||
|
|
||||||
|
Start[Start]
|
||||||
|
SelectCandidate[Select Candidate]
|
||||||
|
UpdateMetadataDown["`Update Metadata(Down)
|
||||||
|
1. Downgrade Leader
|
||||||
|
`"]
|
||||||
|
DowngradeLeader["`Downgrade Leader
|
||||||
|
1. Become Follower
|
||||||
|
2. Return **last_entry_id**
|
||||||
|
`"]
|
||||||
|
UpgradeCandidate["`Upgrade Candidate
|
||||||
|
1. Replay to **last_entry_id**
|
||||||
|
2. Become Leader
|
||||||
|
`"]
|
||||||
|
UpdateMetadataUp["`Update Metadata(Up)
|
||||||
|
1. Switch Leader
|
||||||
|
2.1. Remove Old Leader(Opt.)
|
||||||
|
2.2. Move Old Leader to Follower(Opt.)
|
||||||
|
`"]
|
||||||
|
UpdateMetadataRollback["`Update Metadata(Rollback)
|
||||||
|
1. Upgrade old Leader
|
||||||
|
`"]
|
||||||
|
End
|
||||||
|
AnyCandidate{Available?}
|
||||||
|
OpenCandidate["Open Candidate"]
|
||||||
|
CloseOldLeader["Close Old Leader"]
|
||||||
|
|
||||||
|
Start
|
||||||
|
--> SelectCandidate
|
||||||
|
--> AnyCandidate
|
||||||
|
--> |Yes| UpdateMetadataDown
|
||||||
|
--> I1["Invalid Frontend Cache"]
|
||||||
|
--> DowngradeLeader
|
||||||
|
--> UpgradeCandidate
|
||||||
|
--> UpdateMetadataUp
|
||||||
|
--> I2["Invalid Frontend Cache"]
|
||||||
|
--> End
|
||||||
|
|
||||||
|
UpgradeCandidate
|
||||||
|
--> UpdateMetadataRollback
|
||||||
|
--> I3["Invalid Frontend Cache"]
|
||||||
|
--> End
|
||||||
|
|
||||||
|
I2
|
||||||
|
--> CloseOldLeader
|
||||||
|
--> End
|
||||||
|
|
||||||
|
AnyCandidate
|
||||||
|
--> |No| OpenCandidate
|
||||||
|
--> UpdateMetadataDown
|
||||||
|
```
|
||||||
|
|
||||||
|
**Only the red nodes will persist state after it has succeeded**, and other nodes won't persist state. (excluding the Start and End nodes).
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
**The persistent context:** It's shared in each step and available after recovering. It will only be updated/stored after the Red node has succeeded.
|
||||||
|
|
||||||
|
Values:
|
||||||
|
- `region_id`: The target leader region.
|
||||||
|
- `peer`: The target datanode.
|
||||||
|
- `close_old_leader`: Indicates whether close the region.
|
||||||
|
- `leader_may_unreachable`: It's used to support the failover procedure.
|
||||||
|
|
||||||
|
**The Volatile context:** It's shared in each step and available in executing (including retrying). It will be dropped if the procedure runner crashes.
|
||||||
|
|
||||||
|
### Select Candidate
|
||||||
|
|
||||||
|
The Persistent state: Selected Candidate Region.
|
||||||
|
|
||||||
|
### Update Metadata(Down)
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
- The (latest/updated) `version` of `TableRouteValue`, It will be used in the step of `Update Metadata(Up)`.
|
||||||
|
|
||||||
|
### Downgrade Leader
|
||||||
|
This step sends an instruction via heartbeat and performs:
|
||||||
|
1. Downgrades leader region.
|
||||||
|
2. Retrieves the `last_entry_id` (if available).
|
||||||
|
|
||||||
|
If the target leader region is not found:
|
||||||
|
- Sets `close_old_leader` to true.
|
||||||
|
- Sets `leader_may_unreachable` to true.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Waits for region lease expired.
|
||||||
|
- Sets `close_old_leader` to true.
|
||||||
|
- Sets `leader_may_unreachable` to true.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
**The Persistent state:**
|
||||||
|
- `last_entry_id`
|
||||||
|
|
||||||
|
*Passes to next step.
|
||||||
|
|
||||||
|
|
||||||
|
### Upgrade Candidate
|
||||||
|
This step sends an instruction via heartbeat and performs:
|
||||||
|
1. Replays the WAL to latest(`last_entry_id`).
|
||||||
|
2. Upgrades the candidate region.
|
||||||
|
|
||||||
|
If the target region is not found:
|
||||||
|
- Rollbacks.
|
||||||
|
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||||
|
- Exits procedure.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Rollbacks.
|
||||||
|
- Notifies the failover detector if `leader_may_unreachable` == true.
|
||||||
|
- Exits procedure.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
### Update Metadata(Up)
|
||||||
|
This step performs
|
||||||
|
1. Switches Leader.
|
||||||
|
2. Removes Old Leader(Opt.).
|
||||||
|
3. Moves Old Leader to follower(Opt.).
|
||||||
|
|
||||||
|
The `TableRouteValue` version should equal the `TableRouteValue`'s `version` in Persistent context. Otherwise, verifies whether `TableRouteValue` already updated.
|
||||||
|
|
||||||
|
**The Persistent context:**
|
||||||
|
None
|
||||||
|
|
||||||
|
### Close Old Leader(Opt.)
|
||||||
|
This step sends a close region instruction via heartbeat.
|
||||||
|
|
||||||
|
If the target leader region is not found:
|
||||||
|
- Ignore.
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Ignore.
|
||||||
|
|
||||||
|
### Open Candidate(Opt.)
|
||||||
|
This step sends an open region instruction via heartbeat and waits for conditions to be met (typically, the condition is that the `last_entry_id` of the Candidate Region is very close to that of the Leader Region or the latest).
|
||||||
|
|
||||||
|
If the target Datanode is unreachable:
|
||||||
|
- Exits procedure.
|
||||||
44
docs/rfcs/2023-12-22-enclose-column-id.md
Normal file
44
docs/rfcs/2023-12-22-enclose-column-id.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
Feature Name: Enclose Column Id
|
||||||
|
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/2982
|
||||||
|
Date: 2023-12-22
|
||||||
|
Author: "Ruihang Xia <waynestxia@gmail.com>"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
This RFC proposes to enclose the usage of `ColumnId` into the region engine only.
|
||||||
|
|
||||||
|
# Motivation
|
||||||
|
`ColumnId` is an identifier for columns. It's assigned by meta server, stored in `TableInfo` and `RegionMetadata` and used in region engine to distinguish columns.
|
||||||
|
|
||||||
|
At present, Both Frontend, Datanode and Metasrv are aware of `ColumnId` but it's only used in region engine. Thus this RFC proposes to remove it from Frontend (mainly used in `TableInfo`) and Metasrv.
|
||||||
|
|
||||||
|
# Details
|
||||||
|
|
||||||
|
`ColumnId` is used widely on both read and write paths. Removing it from Frontend and Metasrv implies several things:
|
||||||
|
|
||||||
|
- A column may have different column id in different regions.
|
||||||
|
- A column is identified by its name in all components.
|
||||||
|
- Column order in the region engine is not restricted, i.e., no need to be in the same order with table info.
|
||||||
|
|
||||||
|
The first thing doesn't matter IMO. This concept doesn't exist anymore outside of region server, and each region is autonomous and independent -- the only guarantee it should hold is those columns exist. But if we consider region repartition, where the SST file would be re-assign to different regions, things would become a bit more complicated. A possible solution is store the relation between name and ColumnId in the manifest, but it's out of the scope of this RFC. We can likely give a workaround by introducing a indirection mapping layer of different version of partitions.
|
||||||
|
|
||||||
|
And more importantly, we can still assume columns have the same column ids across regions. We have procedure to maintain consistency between regions and the region engine should ensure alterations are idempotent. So it is possible that region repartition doesn't need to consider column ids or other region metadata in the future.
|
||||||
|
|
||||||
|
Users write and query column by their names, not by ColumnId or something else. The second point also means to change the column reference in ScanRequest from index to name. This change can hugely alleviate the misuse of the column index, which has given us many surprises.
|
||||||
|
|
||||||
|
And for the last one, column order only matters in table info. This order is used in user-faced table structure operation, like add column, describe column or as the default order of INSERT clause. None of them is connected with the order in storage.
|
||||||
|
|
||||||
|
# Drawback
|
||||||
|
Firstly, this is a breaking change. Delivering this change requires a full upgrade of the cluster. Secondly, this change may introduce some performance regression. For example, we have to pass the full table name in the `ScanRequest` instead of the `ColumnId`. But this influence is very limited, since the column index is only used in the region engine.
|
||||||
|
|
||||||
|
# Alternatives
|
||||||
|
|
||||||
|
There are two alternatives from the perspective of "what can be used as the column identifier":
|
||||||
|
|
||||||
|
- Index of column to the table schema
|
||||||
|
- `ColumnId` of that column
|
||||||
|
|
||||||
|
The first one is what we are using now. By choosing this way, it's required to keep the column order in the region engine the same as the table info. This is not hard to achieve, but it's a bit annoying. And things become tricky when there is internal column or different schemas like those stored in file format. And this is the initial purpose of this RFC, which is trying to decouple the table schema and region schema.
|
||||||
|
|
||||||
|
The second one, in other hand, requires the `ColumnId` should be identical in all regions and `TableInfo`. It has the same drawback with the previous alternative, that the `TableInfo` and `RegionMetadata` are tighted together. Another point is that the `ColumnId` is assigned by the Metasrv, who doesn't need it but have to maintain it. And this also limits the functionality of `ColumnId`, by taking the ability of assigning it from concrete region engine.
|
||||||
24
licenserc.toml
Normal file
24
licenserc.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Copyright 2023 Greptime Team
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
headerPath = "Apache-2.0.txt"
|
||||||
|
|
||||||
|
includes = [
|
||||||
|
"*.rs",
|
||||||
|
"*.py",
|
||||||
|
]
|
||||||
|
|
||||||
|
[properties]
|
||||||
|
inceptionYear = 2023
|
||||||
|
copyrightOwner = "Greptime Team"
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2023-08-07"
|
channel = "nightly-2023-12-19"
|
||||||
|
|||||||
@@ -5,14 +5,16 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-error = { workspace = true }
|
common-decimal.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-error.workspace = true
|
||||||
common-time = { workspace = true }
|
common-macro.workspace = true
|
||||||
datatypes = { workspace = true }
|
common-time.workspace = true
|
||||||
|
datatypes.workspace = true
|
||||||
greptime-proto.workspace = true
|
greptime-proto.workspace = true
|
||||||
|
paste = "1.0"
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
|||||||
@@ -28,7 +28,12 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
#[stack_trace_debug]
|
#[stack_trace_debug]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||||
UnknownColumnDataType { datatype: i32, location: Location },
|
UnknownColumnDataType {
|
||||||
|
datatype: i32,
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: prost::DecodeError,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
IntoColumnDataType {
|
IntoColumnDataType {
|
||||||
|
|||||||
@@ -15,6 +15,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_base::BitVec;
|
use common_base::BitVec;
|
||||||
|
use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
|
||||||
|
use common_decimal::Decimal128;
|
||||||
use common_time::interval::IntervalUnit;
|
use common_time::interval::IntervalUnit;
|
||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
@@ -26,47 +28,71 @@ use datatypes::types::{
|
|||||||
};
|
};
|
||||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
|
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||||
DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
|
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||||
UInt64Vector, VectorRef,
|
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||||
};
|
};
|
||||||
|
use greptime_proto::v1;
|
||||||
|
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||||
use greptime_proto::v1::ddl_request::Expr;
|
use greptime_proto::v1::ddl_request::Expr;
|
||||||
use greptime_proto::v1::greptime_request::Request;
|
use greptime_proto::v1::greptime_request::Request;
|
||||||
use greptime_proto::v1::query_request::Query;
|
use greptime_proto::v1::query_request::Query;
|
||||||
use greptime_proto::v1::value::ValueData;
|
use greptime_proto::v1::value::ValueData;
|
||||||
use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
|
use greptime_proto::v1::{
|
||||||
|
ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
|
||||||
|
};
|
||||||
|
use paste::paste;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::v1::column::Values;
|
use crate::v1::column::Values;
|
||||||
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
/// ColumnDataTypeWrapper is a wrapper of ColumnDataType and ColumnDataTypeExtension.
|
||||||
pub struct ColumnDataTypeWrapper(ColumnDataType);
|
/// It could be used to convert with ConcreteDataType.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType,
|
||||||
|
datatype_ext: Option<ColumnDataTypeExtension>,
|
||||||
|
}
|
||||||
|
|
||||||
impl ColumnDataTypeWrapper {
|
impl ColumnDataTypeWrapper {
|
||||||
pub fn try_new(datatype: i32) -> Result<Self> {
|
/// Try to create a ColumnDataTypeWrapper from i32(ColumnDataType) and ColumnDataTypeExtension.
|
||||||
let datatype = ColumnDataType::from_i32(datatype)
|
pub fn try_new(datatype: i32, datatype_ext: Option<ColumnDataTypeExtension>) -> Result<Self> {
|
||||||
|
let datatype = ColumnDataType::try_from(datatype)
|
||||||
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
|
||||||
Ok(Self(datatype))
|
Ok(Self {
|
||||||
|
datatype,
|
||||||
|
datatype_ext,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(datatype: ColumnDataType) -> Self {
|
/// Create a ColumnDataTypeWrapper from ColumnDataType and ColumnDataTypeExtension.
|
||||||
Self(datatype)
|
pub fn new(datatype: ColumnDataType, datatype_ext: Option<ColumnDataTypeExtension>) -> Self {
|
||||||
|
Self {
|
||||||
|
datatype,
|
||||||
|
datatype_ext,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the ColumnDataType.
|
||||||
pub fn datatype(&self) -> ColumnDataType {
|
pub fn datatype(&self) -> ColumnDataType {
|
||||||
self.0
|
self.datatype
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
|
||||||
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
|
(self.datatype, self.datatype_ext.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||||
fn from(datatype: ColumnDataTypeWrapper) -> Self {
|
fn from(datatype_wrapper: ColumnDataTypeWrapper) -> Self {
|
||||||
match datatype.0 {
|
match datatype_wrapper.datatype {
|
||||||
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
|
||||||
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
|
||||||
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
|
||||||
@@ -109,6 +135,100 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ConcreteDataType::duration_microsecond_datatype()
|
ConcreteDataType::duration_microsecond_datatype()
|
||||||
}
|
}
|
||||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||||
|
ColumnDataType::Decimal128 => {
|
||||||
|
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||||
|
.datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ConcreteDataType::decimal128_datatype(d.precision as u8, d.scale as i8)
|
||||||
|
} else {
|
||||||
|
ConcreteDataType::decimal128_default_datatype()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This macro is used to generate datatype functions
|
||||||
|
/// with lower style for ColumnDataTypeWrapper.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// For example: we can use `ColumnDataTypeWrapper::int8_datatype()`,
|
||||||
|
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::Int8`.
|
||||||
|
macro_rules! impl_column_type_functions {
|
||||||
|
($($Type: ident), +) => {
|
||||||
|
paste! {
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
$(
|
||||||
|
pub fn [<$Type:lower _datatype>]() -> ColumnDataTypeWrapper {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::$Type,
|
||||||
|
datatype_ext: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This macro is used to generate datatype functions
|
||||||
|
/// with snake style for ColumnDataTypeWrapper.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// For example: we can use `ColumnDataTypeWrapper::duration_second_datatype()`,
|
||||||
|
/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::DurationSecond`.
|
||||||
|
macro_rules! impl_column_type_functions_with_snake {
|
||||||
|
($($TypeName: ident), +) => {
|
||||||
|
paste!{
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
$(
|
||||||
|
pub fn [<$TypeName:snake _datatype>]() -> ColumnDataTypeWrapper {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::$TypeName,
|
||||||
|
datatype_ext: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_column_type_functions!(
|
||||||
|
Boolean, Uint8, Uint16, Uint32, Uint64, Int8, Int16, Int32, Int64, Float32, Float64, Binary,
|
||||||
|
Date, Datetime, String
|
||||||
|
);
|
||||||
|
|
||||||
|
impl_column_type_functions_with_snake!(
|
||||||
|
TimestampSecond,
|
||||||
|
TimestampMillisecond,
|
||||||
|
TimestampMicrosecond,
|
||||||
|
TimestampNanosecond,
|
||||||
|
TimeSecond,
|
||||||
|
TimeMillisecond,
|
||||||
|
TimeMicrosecond,
|
||||||
|
TimeNanosecond,
|
||||||
|
IntervalYearMonth,
|
||||||
|
IntervalDayTime,
|
||||||
|
IntervalMonthDayNano,
|
||||||
|
DurationSecond,
|
||||||
|
DurationMillisecond,
|
||||||
|
DurationMicrosecond,
|
||||||
|
DurationNanosecond
|
||||||
|
);
|
||||||
|
|
||||||
|
impl ColumnDataTypeWrapper {
|
||||||
|
pub fn decimal128_datatype(precision: i32, scale: i32) -> Self {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::Decimal128,
|
||||||
|
datatype_ext: Some(ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||||
|
precision,
|
||||||
|
scale,
|
||||||
|
})),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,7 +237,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
type Error = error::Error;
|
type Error = error::Error;
|
||||||
|
|
||||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||||
let datatype = ColumnDataTypeWrapper(match datatype {
|
let column_datatype = match datatype {
|
||||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||||
@@ -156,13 +276,30 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||||
},
|
},
|
||||||
|
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Null(_)
|
||||||
| ConcreteDataType::List(_)
|
| ConcreteDataType::List(_)
|
||||||
| ConcreteDataType::Dictionary(_) => {
|
| ConcreteDataType::Dictionary(_) => {
|
||||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
Ok(datatype)
|
let datatype_extension = match column_datatype {
|
||||||
|
ColumnDataType::Decimal128 => {
|
||||||
|
datatype
|
||||||
|
.as_decimal128()
|
||||||
|
.map(|decimal_type| ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
|
||||||
|
precision: decimal_type.precision() as i32,
|
||||||
|
scale: decimal_type.scale() as i32,
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
Ok(Self {
|
||||||
|
datatype: column_datatype,
|
||||||
|
datatype_ext: datatype_extension,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,6 +425,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
|||||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
ColumnDataType::Decimal128 => Values {
|
||||||
|
decimal128_values: Vec::with_capacity(capacity),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,6 +482,7 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
|||||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||||
},
|
},
|
||||||
|
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) => unreachable!(),
|
||||||
});
|
});
|
||||||
column.null_mask = null_mask.into_vec();
|
column.null_mask = null_mask.into_vec();
|
||||||
@@ -381,17 +523,26 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
|
||||||
pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
|
pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
|
||||||
let interval = Interval::from_i128(v);
|
let interval = Interval::from_i128(v);
|
||||||
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
let (months, days, nanoseconds) = interval.to_month_day_nano();
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months,
|
months,
|
||||||
days,
|
days,
|
||||||
nanoseconds,
|
nanoseconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
/// Convert common decimal128 to grpc decimal128 without precision and scale.
|
||||||
|
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
|
||||||
|
let (hi, lo) = v.split_value();
|
||||||
|
v1::Decimal128 { hi, lo }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pb_value_to_value_ref<'a>(
|
||||||
|
value: &'a v1::Value,
|
||||||
|
datatype_ext: &'a Option<ColumnDataTypeExtension>,
|
||||||
|
) -> ValueRef<'a> {
|
||||||
let Some(value) = &value.value_data else {
|
let Some(value) = &value.value_data else {
|
||||||
return ValueRef::Null;
|
return ValueRef::Null;
|
||||||
};
|
};
|
||||||
@@ -426,9 +577,9 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
|||||||
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
|
||||||
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
|
||||||
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
|
||||||
ValueData::IntervalYearMonthValues(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
|
||||||
ValueData::IntervalDayTimeValues(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
|
||||||
ValueData::IntervalMonthDayNanoValues(v) => {
|
ValueData::IntervalMonthDayNanoValue(v) => {
|
||||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||||
ValueRef::Interval(interval)
|
ValueRef::Interval(interval)
|
||||||
}
|
}
|
||||||
@@ -436,6 +587,28 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
|
|||||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||||
|
ValueData::Decimal128Value(v) => {
|
||||||
|
// get precision and scale from datatype_extension
|
||||||
|
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|column_ext| column_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
d.precision as u8,
|
||||||
|
d.scale as i8,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
// If the precision and scale are not set, use the default value.
|
||||||
|
ValueRef::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
DECIMAL128_MAX_PRECISION,
|
||||||
|
DECIMAL128_DEFAULT_SCALE,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,6 +695,11 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
|||||||
values.duration_nanosecond_values,
|
values.duration_nanosecond_values,
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
|
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||||
|
values.decimal128_values.iter().map(|x| {
|
||||||
|
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||||
|
}),
|
||||||
|
)),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
@@ -692,6 +870,18 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||||
.collect(),
|
.collect(),
|
||||||
|
ConcreteDataType::Decimal128(d) => values
|
||||||
|
.decimal128_values
|
||||||
|
.into_iter()
|
||||||
|
.map(|v| {
|
||||||
|
Value::Decimal128(Decimal128::from_value_precision_scale(
|
||||||
|
v.hi,
|
||||||
|
v.lo,
|
||||||
|
d.precision(),
|
||||||
|
d.scale(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
@@ -704,12 +894,14 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the pb type value is valid.
|
/// Returns true if the pb type value is valid.
|
||||||
pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
|
pub fn is_column_type_value_eq(
|
||||||
let Some(column_type) = ColumnDataType::from_i32(type_value) else {
|
type_value: i32,
|
||||||
return false;
|
type_extension: Option<ColumnDataTypeExtension>,
|
||||||
};
|
expect_type: &ConcreteDataType,
|
||||||
|
) -> bool {
|
||||||
is_column_type_eq(column_type, expect_type)
|
ColumnDataTypeWrapper::try_new(type_value, type_extension)
|
||||||
|
.map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
|
||||||
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert value into proto's value.
|
/// Convert value into proto's value.
|
||||||
@@ -791,13 +983,13 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
},
|
},
|
||||||
Value::Interval(v) => match v.unit() {
|
Value::Interval(v) => match v.unit() {
|
||||||
IntervalUnit::YearMonth => v1::Value {
|
IntervalUnit::YearMonth => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalYearMonthValues(v.to_i32())),
|
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
||||||
},
|
},
|
||||||
IntervalUnit::DayTime => v1::Value {
|
IntervalUnit::DayTime => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalDayTimeValues(v.to_i64())),
|
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
||||||
},
|
},
|
||||||
IntervalUnit::MonthDayNano => v1::Value {
|
IntervalUnit::MonthDayNano => v1::Value {
|
||||||
value_data: Some(ValueData::IntervalMonthDayNanoValues(
|
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
||||||
convert_i128_to_interval(v.to_i128()),
|
convert_i128_to_interval(v.to_i128()),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
@@ -816,13 +1008,16 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
|||||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Value::Decimal128(v) => v1::Value {
|
||||||
|
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
|
},
|
||||||
Value::List(_) => return None,
|
Value::List(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(proto_value)
|
Some(proto_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [ColumnDataType] of the value.
|
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||||
///
|
///
|
||||||
/// If value is null, returns `None`.
|
/// If value is null, returns `None`.
|
||||||
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||||
@@ -850,72 +1045,18 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
|||||||
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
|
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
|
||||||
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
|
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
|
||||||
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
|
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
|
||||||
ValueData::IntervalYearMonthValues(_) => ColumnDataType::IntervalYearMonth,
|
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||||
ValueData::IntervalDayTimeValues(_) => ColumnDataType::IntervalDayTime,
|
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||||
ValueData::IntervalMonthDayNanoValues(_) => ColumnDataType::IntervalMonthDayNano,
|
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||||
|
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||||
};
|
};
|
||||||
Some(value_type)
|
Some(value_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert [ConcreteDataType] to [ColumnDataType].
|
|
||||||
pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
|
|
||||||
let column_data_type = match data_type {
|
|
||||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
|
||||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
|
||||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
|
||||||
ConcreteDataType::Int32(_) => ColumnDataType::Int32,
|
|
||||||
ConcreteDataType::Int64(_) => ColumnDataType::Int64,
|
|
||||||
ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
|
|
||||||
ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
|
|
||||||
ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
|
|
||||||
ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
|
|
||||||
ConcreteDataType::Float32(_) => ColumnDataType::Float32,
|
|
||||||
ConcreteDataType::Float64(_) => ColumnDataType::Float64,
|
|
||||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
|
||||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
|
||||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
|
||||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
|
|
||||||
ColumnDataType::TimestampMillisecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
|
|
||||||
ColumnDataType::TimestampMicrosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
|
|
||||||
ColumnDataType::TimestampNanosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
|
|
||||||
ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
|
|
||||||
ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
|
|
||||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
|
|
||||||
ColumnDataType::DurationMillisecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
|
|
||||||
ColumnDataType::DurationMicrosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
|
|
||||||
ColumnDataType::DurationNanosecond
|
|
||||||
}
|
|
||||||
ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
|
|
||||||
ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
|
|
||||||
ColumnDataType::IntervalMonthDayNano
|
|
||||||
}
|
|
||||||
ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
|
|
||||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
|
||||||
return None
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(column_data_type)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn vectors_to_rows<'a>(
|
pub fn vectors_to_rows<'a>(
|
||||||
columns: impl Iterator<Item = &'a VectorRef>,
|
columns: impl Iterator<Item = &'a VectorRef>,
|
||||||
row_count: usize,
|
row_count: usize,
|
||||||
@@ -962,10 +1103,10 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
Value::Interval(v) => Some(match v.unit() {
|
Value::Interval(v) => Some(match v.unit() {
|
||||||
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValues(v.to_i32()),
|
IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
|
||||||
IntervalUnit::DayTime => ValueData::IntervalDayTimeValues(v.to_i64()),
|
IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
|
||||||
IntervalUnit::MonthDayNano => {
|
IntervalUnit::MonthDayNano => {
|
||||||
ValueData::IntervalMonthDayNanoValues(convert_i128_to_interval(v.to_i128()))
|
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
Value::Duration(v) => Some(match v.unit() {
|
Value::Duration(v) => Some(match v.unit() {
|
||||||
@@ -974,20 +1115,12 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||||
}),
|
}),
|
||||||
|
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||||
Value::List(_) => unreachable!(),
|
Value::List(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the column type is equal to expected type.
|
|
||||||
fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
|
|
||||||
if let Some(expect) = to_column_data_type(expect_type) {
|
|
||||||
column_type == expect
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -1081,189 +1214,204 @@ mod tests {
|
|||||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||||
let values = values.duration_millisecond_values;
|
let values = values.duration_millisecond_values;
|
||||||
assert_eq!(2, values.capacity());
|
assert_eq!(2, values.capacity());
|
||||||
|
|
||||||
|
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||||
|
let values = values.decimal128_values;
|
||||||
|
assert_eq!(2, values.capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_concrete_datatype_from_column_datatype() {
|
fn test_concrete_datatype_from_column_datatype() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::boolean_datatype(),
|
ConcreteDataType::boolean_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
|
ColumnDataTypeWrapper::boolean_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int8_datatype(),
|
ConcreteDataType::int8_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int8).into()
|
ColumnDataTypeWrapper::int8_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int16_datatype(),
|
ConcreteDataType::int16_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int16).into()
|
ColumnDataTypeWrapper::int16_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int32_datatype(),
|
ConcreteDataType::int32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int32).into()
|
ColumnDataTypeWrapper::int32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::int64_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int64).into()
|
ColumnDataTypeWrapper::int64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint8_datatype(),
|
ConcreteDataType::uint8_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
|
ColumnDataTypeWrapper::uint8_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint16_datatype(),
|
ConcreteDataType::uint16_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
|
ColumnDataTypeWrapper::uint16_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint32_datatype(),
|
ConcreteDataType::uint32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
|
ColumnDataTypeWrapper::uint32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::uint64_datatype(),
|
ConcreteDataType::uint64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
|
ColumnDataTypeWrapper::uint64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::float32_datatype(),
|
ConcreteDataType::float32_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float32).into()
|
ColumnDataTypeWrapper::float32_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::float64_datatype(),
|
ConcreteDataType::float64_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float64).into()
|
ColumnDataTypeWrapper::float64_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::binary_datatype(),
|
ConcreteDataType::binary_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Binary).into()
|
ColumnDataTypeWrapper::binary_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::string_datatype(),
|
ConcreteDataType::string_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::String).into()
|
ColumnDataTypeWrapper::string_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::date_datatype(),
|
ConcreteDataType::date_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Date).into()
|
ColumnDataTypeWrapper::date_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::datetime_datatype(),
|
ConcreteDataType::datetime_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
|
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
|
ColumnDataTypeWrapper::timestamp_millisecond_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
|
ColumnDataTypeWrapper::time_millisecond_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
|
ColumnDataTypeWrapper::interval_day_time_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
|
ColumnDataTypeWrapper::interval_year_month_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
|
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ConcreteDataType::duration_millisecond_datatype(),
|
ConcreteDataType::duration_millisecond_datatype(),
|
||||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
|
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::decimal128_datatype(10, 2),
|
||||||
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_column_datatype_from_concrete_datatype() {
|
fn test_column_datatype_from_concrete_datatype() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Boolean),
|
ColumnDataTypeWrapper::boolean_datatype(),
|
||||||
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
ConcreteDataType::boolean_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int8),
|
ColumnDataTypeWrapper::int8_datatype(),
|
||||||
ConcreteDataType::int8_datatype().try_into().unwrap()
|
ConcreteDataType::int8_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int16),
|
ColumnDataTypeWrapper::int16_datatype(),
|
||||||
ConcreteDataType::int16_datatype().try_into().unwrap()
|
ConcreteDataType::int16_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int32),
|
ColumnDataTypeWrapper::int32_datatype(),
|
||||||
ConcreteDataType::int32_datatype().try_into().unwrap()
|
ConcreteDataType::int32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Int64),
|
ColumnDataTypeWrapper::int64_datatype(),
|
||||||
ConcreteDataType::int64_datatype().try_into().unwrap()
|
ConcreteDataType::int64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint8),
|
ColumnDataTypeWrapper::uint8_datatype(),
|
||||||
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
ConcreteDataType::uint8_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint16),
|
ColumnDataTypeWrapper::uint16_datatype(),
|
||||||
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
ConcreteDataType::uint16_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint32),
|
ColumnDataTypeWrapper::uint32_datatype(),
|
||||||
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
ConcreteDataType::uint32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Uint64),
|
ColumnDataTypeWrapper::uint64_datatype(),
|
||||||
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
ConcreteDataType::uint64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float32),
|
ColumnDataTypeWrapper::float32_datatype(),
|
||||||
ConcreteDataType::float32_datatype().try_into().unwrap()
|
ConcreteDataType::float32_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Float64),
|
ColumnDataTypeWrapper::float64_datatype(),
|
||||||
ConcreteDataType::float64_datatype().try_into().unwrap()
|
ConcreteDataType::float64_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Binary),
|
ColumnDataTypeWrapper::binary_datatype(),
|
||||||
ConcreteDataType::binary_datatype().try_into().unwrap()
|
ConcreteDataType::binary_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::String),
|
ColumnDataTypeWrapper::string_datatype(),
|
||||||
ConcreteDataType::string_datatype().try_into().unwrap()
|
ConcreteDataType::string_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Date),
|
ColumnDataTypeWrapper::date_datatype(),
|
||||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::Datetime),
|
ColumnDataTypeWrapper::datetime_datatype(),
|
||||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
|
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||||
ConcreteDataType::timestamp_millisecond_datatype()
|
ConcreteDataType::timestamp_millisecond_datatype()
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
|
ColumnDataTypeWrapper::interval_year_month_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
|
ColumnDataTypeWrapper::interval_day_time_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
|
ColumnDataTypeWrapper::interval_month_day_nano_datatype(),
|
||||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
|
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||||
ConcreteDataType::duration_millisecond_datatype()
|
ConcreteDataType::duration_millisecond_datatype()
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||||
|
ConcreteDataType::decimal128_datatype(10, 2)
|
||||||
|
.try_into()
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -1290,6 +1438,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1331,6 +1480,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1372,6 +1522,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1416,6 +1567,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![],
|
null_mask: vec![],
|
||||||
datatype: 0,
|
datatype: 0,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||||
@@ -1460,6 +1612,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
null_mask: vec![2],
|
null_mask: vec![2],
|
||||||
datatype: ColumnDataType::Boolean as i32,
|
datatype: ColumnDataType::Boolean as i32,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
let row_count = 4;
|
let row_count = 4;
|
||||||
|
|
||||||
@@ -1617,17 +1770,17 @@ mod tests {
|
|||||||
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
|
||||||
Values {
|
Values {
|
||||||
interval_month_day_nano_values: vec![
|
interval_month_day_nano_values: vec![
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 1,
|
months: 1,
|
||||||
days: 2,
|
days: 2,
|
||||||
nanoseconds: 3,
|
nanoseconds: 3,
|
||||||
},
|
},
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 5,
|
months: 5,
|
||||||
days: 6,
|
days: 6,
|
||||||
nanoseconds: 7,
|
nanoseconds: 7,
|
||||||
},
|
},
|
||||||
IntervalMonthDayNano {
|
v1::IntervalMonthDayNano {
|
||||||
months: 9,
|
months: 9,
|
||||||
days: 10,
|
days: 10,
|
||||||
nanoseconds: 11,
|
nanoseconds: 11,
|
||||||
@@ -1859,4 +2012,33 @@ mod tests {
|
|||||||
assert_eq!(values[6], ValueData::DateValue(30));
|
assert_eq!(values[6], ValueData::DateValue(30));
|
||||||
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_column_type_value_eq() {
|
||||||
|
// test column type eq
|
||||||
|
let column1 = Column {
|
||||||
|
column_name: "test".to_string(),
|
||||||
|
semantic_type: 0,
|
||||||
|
values: Some(Values {
|
||||||
|
bool_values: vec![false, true, true],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
null_mask: vec![2],
|
||||||
|
datatype: ColumnDataType::Boolean as i32,
|
||||||
|
datatype_extension: None,
|
||||||
|
};
|
||||||
|
assert!(is_column_type_value_eq(
|
||||||
|
column1.datatype,
|
||||||
|
column1.datatype_extension,
|
||||||
|
&ConcreteDataType::boolean_datatype(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_convert_to_pb_decimal128() {
|
||||||
|
let decimal = Decimal128::new(123, 3, 1);
|
||||||
|
let pb_decimal = convert_to_pb_decimal128(decimal);
|
||||||
|
assert_eq!(pb_decimal.lo, 123);
|
||||||
|
assert_eq!(pb_decimal.hi, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,10 @@ use crate::helper::ColumnDataTypeWrapper;
|
|||||||
use crate::v1::ColumnDef;
|
use crate::v1::ColumnDef;
|
||||||
|
|
||||||
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
||||||
let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
|
let data_type = ColumnDataTypeWrapper::try_new(
|
||||||
|
column_def.data_type,
|
||||||
|
column_def.datatype_extension.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
let constraint = if column_def.default_constraint.is_empty() {
|
let constraint = if column_def.default_constraint.is_empty() {
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,13 +4,14 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use secrecy::ExposeSecret;
|
use secrecy::ExposeSecret;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
//
|
//
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
//
|
//
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
|||||||
@@ -8,46 +8,47 @@ license.workspace = true
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
common-catalog = { workspace = true }
|
build-data = "0.1"
|
||||||
common-error = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-grpc = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-grpc.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-query = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-query.workspace = true
|
||||||
common-runtime = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-runtime.workspace = true
|
||||||
common-time = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
dashmap = "5.4"
|
common-time.workspace = true
|
||||||
|
dashmap.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client = { workspace = true }
|
meta-client.workspace = true
|
||||||
metrics.workspace = true
|
|
||||||
moka = { workspace = true, features = ["future"] }
|
moka = { workspace = true, features = ["future"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
|
paste = "1.0"
|
||||||
|
prometheus.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json.workspace = true
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu = { version = "0.7", features = ["backtraces"] }
|
snafu.workspace = true
|
||||||
store-api = { workspace = true }
|
store-api.workspace = true
|
||||||
table = { workspace = true }
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
catalog = { workspace = true, features = ["testing"] }
|
catalog = { workspace = true, features = ["testing"] }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
log-store = { workspace = true }
|
log-store.workspace = true
|
||||||
object-store = { workspace = true }
|
object-store.workspace = true
|
||||||
storage = { workspace = true }
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ pub enum Error {
|
|||||||
source: table::error::Error,
|
source: table::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(""))]
|
#[snafu(display("Internal error"))]
|
||||||
Internal {
|
Internal {
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
@@ -216,7 +216,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
|
||||||
QueryAccessDenied { catalog: String, schema: String },
|
QueryAccessDenied { catalog: String, schema: String },
|
||||||
|
|
||||||
#[snafu(display(""))]
|
#[snafu(display("DataFusion error"))]
|
||||||
Datafusion {
|
Datafusion {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: DataFusionError,
|
error: DataFusionError,
|
||||||
|
|||||||
@@ -13,16 +13,20 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod columns;
|
mod columns;
|
||||||
|
mod memory_table;
|
||||||
|
mod table_names;
|
||||||
mod tables;
|
mod tables;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
|
use common_catalog::consts::{self, INFORMATION_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use paste::paste;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use store_api::data_source::DataSource;
|
use store_api::data_source::DataSource;
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
@@ -32,43 +36,102 @@ use table::metadata::{
|
|||||||
};
|
};
|
||||||
use table::thin_table::{ThinTable, ThinTableAdapter};
|
use table::thin_table::{ThinTable, ThinTableAdapter};
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
pub use table_names::*;
|
||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||||
use crate::information_schema::tables::InformationSchemaTables;
|
use crate::information_schema::tables::InformationSchemaTables;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
pub const TABLES: &str = "tables";
|
lazy_static! {
|
||||||
pub const COLUMNS: &str = "columns";
|
// Memory tables in `information_schema`.
|
||||||
|
static ref MEMORY_TABLES: &'static [&'static str] = &[
|
||||||
|
ENGINES,
|
||||||
|
COLUMN_PRIVILEGES,
|
||||||
|
COLUMN_STATISTICS,
|
||||||
|
BUILD_INFO,
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! setup_memory_table {
|
||||||
|
($name: expr) => {
|
||||||
|
paste! {
|
||||||
|
{
|
||||||
|
let (schema, columns) = get_schema_columns($name);
|
||||||
|
Some(Arc::new(MemoryTable::new(
|
||||||
|
consts::[<INFORMATION_SCHEMA_ $name _TABLE_ID>],
|
||||||
|
$name,
|
||||||
|
schema,
|
||||||
|
columns
|
||||||
|
)) as _)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The `information_schema` tables info provider.
|
||||||
pub struct InformationSchemaProvider {
|
pub struct InformationSchemaProvider {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
|
tables: HashMap<String, TableRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InformationSchemaProvider {
|
impl InformationSchemaProvider {
|
||||||
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
pub fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||||
Self {
|
let mut provider = Self {
|
||||||
catalog_name,
|
catalog_name,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
}
|
tables: HashMap::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
provider.build_tables();
|
||||||
|
|
||||||
|
provider
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a map of [TableRef] in information schema.
|
/// Returns table names in the order of table id.
|
||||||
/// Including `tables` and `columns`.
|
pub fn table_names(&self) -> Vec<String> {
|
||||||
pub fn build(
|
let mut tables = self.tables.values().clone().collect::<Vec<_>>();
|
||||||
catalog_name: String,
|
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
|
||||||
) -> HashMap<String, TableRef> {
|
|
||||||
let provider = Self::new(catalog_name, catalog_manager);
|
|
||||||
|
|
||||||
let mut schema = HashMap::new();
|
tables.sort_by(|t1, t2| {
|
||||||
schema.insert(TABLES.to_owned(), provider.table(TABLES).unwrap());
|
t1.table_info()
|
||||||
schema.insert(COLUMNS.to_owned(), provider.table(COLUMNS).unwrap());
|
.table_id()
|
||||||
schema
|
.partial_cmp(&t2.table_info().table_id())
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
tables
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| t.table_info().name.clone())
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a map of [TableRef] in information schema.
|
||||||
|
pub fn tables(&self) -> &HashMap<String, TableRef> {
|
||||||
|
assert!(!self.tables.is_empty());
|
||||||
|
|
||||||
|
&self.tables
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the [TableRef] by table name.
|
||||||
pub fn table(&self, name: &str) -> Option<TableRef> {
|
pub fn table(&self, name: &str) -> Option<TableRef> {
|
||||||
|
self.tables.get(name).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_tables(&mut self) {
|
||||||
|
let mut tables = HashMap::new();
|
||||||
|
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||||
|
tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
|
||||||
|
|
||||||
|
// Add memory tables
|
||||||
|
for name in MEMORY_TABLES.iter() {
|
||||||
|
tables.insert((*name).to_string(), self.build_table(name).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tables = tables;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_table(&self, name: &str) -> Option<TableRef> {
|
||||||
self.information_table(name).map(|table| {
|
self.information_table(name).map(|table| {
|
||||||
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
let table_info = Self::table_info(self.catalog_name.clone(), &table);
|
||||||
let filter_pushdown = FilterPushDownType::Unsupported;
|
let filter_pushdown = FilterPushDownType::Unsupported;
|
||||||
@@ -89,6 +152,10 @@ impl InformationSchemaProvider {
|
|||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
|
ENGINES => setup_memory_table!(ENGINES),
|
||||||
|
COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
|
||||||
|
COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
|
||||||
|
BUILD_INFO => setup_memory_table!(BUILD_INFO),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,9 +169,9 @@ impl InformationSchemaProvider {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let table_info = TableInfoBuilder::default()
|
let table_info = TableInfoBuilder::default()
|
||||||
.table_id(table.table_id())
|
.table_id(table.table_id())
|
||||||
.name(table.table_name().to_owned())
|
.name(table.table_name().to_string())
|
||||||
.catalog_name(catalog_name)
|
.catalog_name(catalog_name)
|
||||||
.schema_name(INFORMATION_SCHEMA_NAME.to_owned())
|
.schema_name(INFORMATION_SCHEMA_NAME.to_string())
|
||||||
.meta(table_meta)
|
.meta(table_meta)
|
||||||
.table_type(table.table_type())
|
.table_type(table.table_type())
|
||||||
.build()
|
.build()
|
||||||
@@ -171,11 +238,12 @@ impl DataSource for InformationTableDataSource {
|
|||||||
None => batch,
|
None => batch,
|
||||||
});
|
});
|
||||||
|
|
||||||
let stream = RecordBatchStreamAdaptor {
|
let stream = RecordBatchStreamWrapper {
|
||||||
schema: projected_schema,
|
schema: projected_schema,
|
||||||
stream: Box::pin(stream),
|
stream: Box::pin(stream),
|
||||||
output_ordering: None,
|
output_ordering: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Box::pin(stream))
|
Ok(Box::pin(stream))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::{
|
||||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD,
|
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
|
||||||
SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
|
SEMANTIC_TYPE_TIME_INDEX,
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
use common_query::physical_plan::TaskContext;
|
||||||
@@ -33,8 +33,7 @@ use datatypes::vectors::{StringVectorBuilder, VectorRef};
|
|||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use store_api::storage::TableId;
|
use store_api::storage::TableId;
|
||||||
|
|
||||||
use super::tables::InformationSchemaTables;
|
use super::{InformationTable, COLUMNS};
|
||||||
use super::{InformationTable, COLUMNS, TABLES};
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
@@ -102,7 +101,7 @@ impl InformationTable for InformationSchemaColumns {
|
|||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_columns()
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
@@ -148,8 +147,8 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct the `information_schema.tables` virtual table
|
/// Construct the `information_schema.columns` virtual table
|
||||||
async fn make_tables(&mut self) -> Result<RecordBatch> {
|
async fn make_columns(&mut self) -> Result<RecordBatch> {
|
||||||
let catalog_name = self.catalog_name.clone();
|
let catalog_name = self.catalog_name.clone();
|
||||||
let catalog_manager = self
|
let catalog_manager = self
|
||||||
.catalog_manager
|
.catalog_manager
|
||||||
@@ -163,48 +162,38 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
for table_name in catalog_manager
|
for table_name in catalog_manager
|
||||||
.table_names(&catalog_name, &schema_name)
|
.table_names(&catalog_name, &schema_name)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
let (keys, schema) = if let Some(table) = catalog_manager
|
if let Some(table) = catalog_manager
|
||||||
.table(&catalog_name, &schema_name, &table_name)
|
.table(&catalog_name, &schema_name, &table_name)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
let keys = &table.table_info().meta.primary_key_indices;
|
let keys = &table.table_info().meta.primary_key_indices;
|
||||||
let schema = table.schema();
|
let schema = table.schema();
|
||||||
(keys.clone(), schema)
|
|
||||||
} else {
|
|
||||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
|
||||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
|
||||||
if table_name == COLUMNS {
|
|
||||||
(vec![], InformationSchemaColumns::schema())
|
|
||||||
} else if table_name == TABLES {
|
|
||||||
(vec![], InformationSchemaTables::schema())
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
for (idx, column) in schema.column_schemas().iter().enumerate() {
|
||||||
let semantic_type = if column.is_time_index() {
|
let semantic_type = if column.is_time_index() {
|
||||||
SEMANTIC_TYPE_TIME_INDEX
|
SEMANTIC_TYPE_TIME_INDEX
|
||||||
} else if keys.contains(&idx) {
|
} else if keys.contains(&idx) {
|
||||||
SEMANTIC_TYPE_PRIMARY_KEY
|
SEMANTIC_TYPE_PRIMARY_KEY
|
||||||
} else {
|
} else {
|
||||||
SEMANTIC_TYPE_FIELD
|
SEMANTIC_TYPE_FIELD
|
||||||
};
|
};
|
||||||
self.add_column(
|
|
||||||
&catalog_name,
|
self.add_column(
|
||||||
&schema_name,
|
&catalog_name,
|
||||||
&table_name,
|
&schema_name,
|
||||||
&column.name,
|
&table_name,
|
||||||
column.data_type.name(),
|
&column.name,
|
||||||
semantic_type,
|
&column.data_type.name(),
|
||||||
);
|
semantic_type,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unreachable!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -238,6 +227,7 @@ impl InformationSchemaColumnsBuilder {
|
|||||||
Arc::new(self.data_types.finish()),
|
Arc::new(self.data_types.finish()),
|
||||||
Arc::new(self.semantic_types.finish()),
|
Arc::new(self.semantic_types.finish()),
|
||||||
];
|
];
|
||||||
|
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -254,7 +244,7 @@ impl DfPartitionStream for InformationSchemaColumns {
|
|||||||
schema,
|
schema,
|
||||||
futures::stream::once(async move {
|
futures::stream::once(async move {
|
||||||
builder
|
builder
|
||||||
.make_tables()
|
.make_columns()
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
|
|||||||
214
src/catalog/src/information_schema/memory_table.rs
Normal file
214
src/catalog/src/information_schema/memory_table.rs
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
mod tables;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::physical_plan::TaskContext;
|
||||||
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
use datatypes::schema::SchemaRef;
|
||||||
|
use datatypes::vectors::VectorRef;
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use store_api::storage::TableId;
|
||||||
|
pub use tables::get_schema_columns;
|
||||||
|
|
||||||
|
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
||||||
|
use crate::information_schema::InformationTable;
|
||||||
|
|
||||||
|
/// A memory table with specified schema and columns.
|
||||||
|
pub(super) struct MemoryTable {
|
||||||
|
table_id: TableId,
|
||||||
|
table_name: &'static str,
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryTable {
|
||||||
|
/// Creates a memory table with table id, name, schema and columns.
|
||||||
|
pub(super) fn new(
|
||||||
|
table_id: TableId,
|
||||||
|
table_name: &'static str,
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
table_id,
|
||||||
|
table_name,
|
||||||
|
schema,
|
||||||
|
columns,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn builder(&self) -> MemoryTableBuilder {
|
||||||
|
MemoryTableBuilder::new(self.schema.clone(), self.columns.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InformationTable for MemoryTable {
|
||||||
|
fn table_id(&self) -> TableId {
|
||||||
|
self.table_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_name(&self) -> &'static str {
|
||||||
|
self.table_name
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_stream(&self) -> Result<SendableRecordBatchStream> {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.memory_records()
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
));
|
||||||
|
Ok(Box::pin(
|
||||||
|
RecordBatchStreamAdapter::try_new(stream)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(InternalSnafu)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MemoryTableBuilder {
|
||||||
|
schema: SchemaRef,
|
||||||
|
columns: Vec<VectorRef>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryTableBuilder {
|
||||||
|
fn new(schema: SchemaRef, columns: Vec<VectorRef>) -> Self {
|
||||||
|
Self { schema, columns }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct the `information_schema.{table_name}` virtual table
|
||||||
|
async fn memory_records(&mut self) -> Result<RecordBatch> {
|
||||||
|
if self.columns.is_empty() {
|
||||||
|
RecordBatch::new_empty(self.schema.clone()).context(CreateRecordBatchSnafu)
|
||||||
|
} else {
|
||||||
|
RecordBatch::new(self.schema.clone(), std::mem::take(&mut self.columns))
|
||||||
|
.context(CreateRecordBatchSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DfPartitionStream for MemoryTable {
|
||||||
|
fn schema(&self) -> &ArrowSchemaRef {
|
||||||
|
self.schema.arrow_schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||||
|
let schema = self.schema.arrow_schema().clone();
|
||||||
|
let mut builder = self.builder();
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||||
|
schema,
|
||||||
|
futures::stream::once(async move {
|
||||||
|
builder
|
||||||
|
.memory_records()
|
||||||
|
.await
|
||||||
|
.map(|x| x.into_df_record_batch())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_recordbatch::RecordBatches;
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
|
use datatypes::vectors::StringVector;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_memory_table() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||||
|
]));
|
||||||
|
|
||||||
|
let table = MemoryTable::new(
|
||||||
|
42,
|
||||||
|
"test",
|
||||||
|
schema.clone(),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec!["a1", "a2"])),
|
||||||
|
Arc::new(StringVector::from(vec!["b1", "b2"])),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(42, table.table_id());
|
||||||
|
assert_eq!("test", table.table_name());
|
||||||
|
assert_eq!(schema, InformationTable::schema(&table));
|
||||||
|
|
||||||
|
let stream = table.to_stream().unwrap();
|
||||||
|
|
||||||
|
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"\
|
||||||
|
+----+----+
|
||||||
|
| a | b |
|
||||||
|
+----+----+
|
||||||
|
| a1 | b1 |
|
||||||
|
| a2 | b2 |
|
||||||
|
+----+----+",
|
||||||
|
batches.pretty_print().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_empty_memory_table() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
|
||||||
|
]));
|
||||||
|
|
||||||
|
let table = MemoryTable::new(42, "test", schema.clone(), vec![]);
|
||||||
|
|
||||||
|
assert_eq!(42, table.table_id());
|
||||||
|
assert_eq!("test", table.table_name());
|
||||||
|
assert_eq!(schema, InformationTable::schema(&table));
|
||||||
|
|
||||||
|
let stream = table.to_stream().unwrap();
|
||||||
|
|
||||||
|
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"\
|
||||||
|
+---+---+
|
||||||
|
| a | b |
|
||||||
|
+---+---+
|
||||||
|
+---+---+",
|
||||||
|
batches.pretty_print().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
135
src/catalog/src/information_schema/memory_table/tables.rs
Normal file
135
src/catalog/src/information_schema/memory_table/tables.rs
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_catalog::consts::MITO_ENGINE;
|
||||||
|
use datatypes::prelude::{ConcreteDataType, VectorRef};
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||||
|
use datatypes::vectors::StringVector;
|
||||||
|
|
||||||
|
use crate::information_schema::table_names::*;
|
||||||
|
|
||||||
|
const UNKNOWN: &str = "unknown";
|
||||||
|
|
||||||
|
/// Find the schema and columns by the table_name, only valid for memory tables.
|
||||||
|
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
|
||||||
|
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
|
||||||
|
let (column_schemas, columns): (_, Vec<VectorRef>) = match table_name {
|
||||||
|
COLUMN_PRIVILEGES => (
|
||||||
|
string_columns(&[
|
||||||
|
"GRANTEE",
|
||||||
|
"TABLE_CATALOG",
|
||||||
|
"TABLE_SCHEMA",
|
||||||
|
"TABLE_NAME",
|
||||||
|
"COLUMN_NAME",
|
||||||
|
"PRIVILEGE_TYPE",
|
||||||
|
"IS_GRANTABLE",
|
||||||
|
]),
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
COLUMN_STATISTICS => (
|
||||||
|
string_columns(&[
|
||||||
|
"SCHEMA_NAME",
|
||||||
|
"TABLE_NAME",
|
||||||
|
"COLUMN_NAME",
|
||||||
|
// TODO(dennis): It must be a JSON type, but we don't support it yet
|
||||||
|
"HISTOGRAM",
|
||||||
|
]),
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
|
||||||
|
ENGINES => (
|
||||||
|
string_columns(&[
|
||||||
|
"ENGINE",
|
||||||
|
"SUPPORT",
|
||||||
|
"COMMENT",
|
||||||
|
"TRANSACTIONS",
|
||||||
|
"XA",
|
||||||
|
"SAVEPOINTS",
|
||||||
|
]),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec![MITO_ENGINE])),
|
||||||
|
Arc::new(StringVector::from(vec!["DEFAULT"])),
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
"Storage engine for time-series data",
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec!["NO"])),
|
||||||
|
Arc::new(StringVector::from(vec!["NO"])),
|
||||||
|
Arc::new(StringVector::from(vec!["NO"])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
BUILD_INFO => (
|
||||||
|
string_columns(&[
|
||||||
|
"GIT_BRANCH",
|
||||||
|
"GIT_COMMIT",
|
||||||
|
"GIT_COMMIT_SHORT",
|
||||||
|
"GIT_DIRTY",
|
||||||
|
"PKG_VERSION",
|
||||||
|
]),
|
||||||
|
vec![
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
build_data::get_git_branch().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
build_data::get_git_commit().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
build_data::get_git_commit_short().unwrap_or_else(|_| UNKNOWN.to_string())
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec![
|
||||||
|
build_data::get_git_dirty().map_or(UNKNOWN.to_string(), |v| v.to_string())
|
||||||
|
])),
|
||||||
|
Arc::new(StringVector::from(vec![option_env!("CARGO_PKG_VERSION")])),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
|
||||||
|
_ => unreachable!("Unknown table in information_schema: {}", table_name),
|
||||||
|
};
|
||||||
|
|
||||||
|
(Arc::new(Schema::new(column_schemas)), columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn string_columns(names: &[&'static str]) -> Vec<ColumnSchema> {
|
||||||
|
names.iter().map(|name| string_column(name)).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn string_column(name: &str) -> ColumnSchema {
|
||||||
|
ColumnSchema::new(
|
||||||
|
str::to_lowercase(name),
|
||||||
|
ConcreteDataType::string_datatype(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_string_columns() {
|
||||||
|
let columns = ["a", "b", "c"];
|
||||||
|
let column_schemas = string_columns(&columns);
|
||||||
|
|
||||||
|
assert_eq!(3, column_schemas.len());
|
||||||
|
for (i, name) in columns.iter().enumerate() {
|
||||||
|
let cs = column_schemas.get(i).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(*name, cs.name);
|
||||||
|
assert_eq!(ConcreteDataType::string_datatype(), cs.data_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,13 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::storage::SchemaRef;
|
/// All table names in `information_schema`.
|
||||||
|
|
||||||
/// Metadata of a region.
|
pub const TABLES: &str = "tables";
|
||||||
pub trait RegionMeta: Send + Sync {
|
pub const COLUMNS: &str = "columns";
|
||||||
/// Returns the schema of the region.
|
pub const ENGINES: &str = "engines";
|
||||||
fn schema(&self) -> &SchemaRef;
|
pub const COLUMN_PRIVILEGES: &str = "column_privileges";
|
||||||
|
pub const COLUMN_STATISTICS: &str = "column_statistics";
|
||||||
/// Returns the version of the region metadata.
|
pub const BUILD_INFO: &str = "build_info";
|
||||||
fn version(&self) -> u32;
|
|
||||||
}
|
|
||||||
@@ -15,10 +15,7 @@
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||||
INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
|
|
||||||
INFORMATION_SCHEMA_TABLES_TABLE_ID,
|
|
||||||
};
|
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
use common_query::physical_plan::TaskContext;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
@@ -33,7 +30,7 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use store_api::storage::TableId;
|
use store_api::storage::TableId;
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
|
|
||||||
use super::{COLUMNS, TABLES};
|
use super::TABLES;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||||
};
|
};
|
||||||
@@ -178,29 +175,8 @@ impl InformationSchemaTablesBuilder {
|
|||||||
Some(&table_info.meta.engine),
|
Some(&table_info.meta.engine),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
// TODO: this specific branch is only a workaround for FrontendCatalogManager.
|
unreachable!();
|
||||||
if schema_name == INFORMATION_SCHEMA_NAME {
|
}
|
||||||
if table_name == COLUMNS {
|
|
||||||
self.add_table(
|
|
||||||
&catalog_name,
|
|
||||||
&schema_name,
|
|
||||||
&table_name,
|
|
||||||
TableType::Temporary,
|
|
||||||
Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
} else if table_name == TABLES {
|
|
||||||
self.add_table(
|
|
||||||
&catalog_name,
|
|
||||||
&schema_name,
|
|
||||||
&table_name,
|
|
||||||
TableType::Temporary,
|
|
||||||
Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,11 +25,10 @@ use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService};
|
|||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||||
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
|
||||||
DeleteRangeResponse, MoveValueRequest, MoveValueResponse, PutRequest, PutResponse,
|
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||||
RangeRequest, RangeResponse,
|
|
||||||
};
|
};
|
||||||
use common_meta::rpc::KeyValue;
|
use common_meta::rpc::KeyValue;
|
||||||
use common_telemetry::{debug, timer};
|
use common_telemetry::debug;
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -152,25 +151,11 @@ impl KvBackend for CachedMetaKvBackend {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
|
||||||
let from_key = &req.from_key.clone();
|
|
||||||
let to_key = &req.to_key.clone();
|
|
||||||
|
|
||||||
let ret = self.kv_backend.move_value(req).await;
|
|
||||||
|
|
||||||
if ret.is_ok() {
|
|
||||||
self.invalidate_key(from_key).await;
|
|
||||||
self.invalidate_key(to_key).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
async fn get(&self, key: &[u8]) -> Result<Option<KeyValue>> {
|
||||||
let _timer = timer!(METRIC_CATALOG_KV_GET);
|
let _timer = METRIC_CATALOG_KV_GET.start_timer();
|
||||||
|
|
||||||
let init = async {
|
let init = async {
|
||||||
let _timer = timer!(METRIC_CATALOG_KV_REMOTE_GET);
|
let _timer = METRIC_CATALOG_KV_REMOTE_GET.start_timer();
|
||||||
self.kv_backend.get(key).await.map(|val| {
|
self.kv_backend.get(key).await.map(|val| {
|
||||||
val.with_context(|| CacheNotGetSnafu {
|
val.with_context(|| CacheNotGetSnafu {
|
||||||
key: String::from_utf8_lossy(key),
|
key: String::from_utf8_lossy(key),
|
||||||
@@ -319,14 +304,6 @@ impl KvBackend for MetaKvBackend {
|
|||||||
.context(ExternalSnafu)
|
.context(ExternalSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse> {
|
|
||||||
self.client
|
|
||||||
.move_value(req)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use std::sync::{Arc, Weak};
|
|||||||
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
use common_catalog::consts::{DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
use common_meta::cache_invalidator::{CacheInvalidator, CacheInvalidatorRef, Context};
|
||||||
use common_meta::datanode_manager::DatanodeManagerRef;
|
|
||||||
use common_meta::error::Result as MetaResult;
|
use common_meta::error::Result as MetaResult;
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
@@ -39,7 +38,7 @@ use crate::error::{
|
|||||||
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
|
self as catalog_err, ListCatalogsSnafu, ListSchemasSnafu, Result as CatalogResult,
|
||||||
TableMetadataManagerSnafu,
|
TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// Access all existing catalog, schema and tables.
|
/// Access all existing catalog, schema and tables.
|
||||||
@@ -55,7 +54,6 @@ pub struct KvBackendCatalogManager {
|
|||||||
cache_invalidator: CacheInvalidatorRef,
|
cache_invalidator: CacheInvalidatorRef,
|
||||||
partition_manager: PartitionRuleManagerRef,
|
partition_manager: PartitionRuleManagerRef,
|
||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
datanode_manager: DatanodeManagerRef,
|
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
system_catalog: SystemCatalog,
|
system_catalog: SystemCatalog,
|
||||||
}
|
}
|
||||||
@@ -76,18 +74,18 @@ impl CacheInvalidator for KvBackendCatalogManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub fn new(
|
pub fn new(backend: KvBackendRef, cache_invalidator: CacheInvalidatorRef) -> Arc<Self> {
|
||||||
backend: KvBackendRef,
|
|
||||||
cache_invalidator: CacheInvalidatorRef,
|
|
||||||
datanode_manager: DatanodeManagerRef,
|
|
||||||
) -> Arc<Self> {
|
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
cache_invalidator,
|
cache_invalidator,
|
||||||
datanode_manager,
|
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
catalog_manager: me.clone(),
|
catalog_manager: me.clone(),
|
||||||
|
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||||
|
// The catalog name is not used in system_catalog, so let it empty
|
||||||
|
"".to_string(),
|
||||||
|
me.clone(),
|
||||||
|
)),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -99,10 +97,6 @@ impl KvBackendCatalogManager {
|
|||||||
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
pub fn table_metadata_manager_ref(&self) -> &TableMetadataManagerRef {
|
||||||
&self.table_metadata_manager
|
&self.table_metadata_manager
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datanode_manager(&self) -> DatanodeManagerRef {
|
|
||||||
self.datanode_manager.clone()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -133,13 +127,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
.try_collect::<BTreeSet<_>>()
|
.try_collect::<BTreeSet<_>>()
|
||||||
.await
|
.await
|
||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(ListSchemasSnafu { catalog })?
|
.context(ListSchemasSnafu { catalog })?;
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
keys.extend_from_slice(&self.system_catalog.schema_names());
|
keys.extend(self.system_catalog.schema_names());
|
||||||
|
|
||||||
Ok(keys)
|
Ok(keys.into_iter().collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
async fn table_names(&self, catalog: &str, schema: &str) -> CatalogResult<Vec<String>> {
|
||||||
@@ -242,11 +234,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
// a new catalog is created.
|
// a new catalog is created.
|
||||||
/// Existing system tables:
|
/// Existing system tables:
|
||||||
/// - public.numbers
|
/// - public.numbers
|
||||||
/// - information_schema.tables
|
/// - information_schema.{tables}
|
||||||
/// - information_schema.columns
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct SystemCatalog {
|
struct SystemCatalog {
|
||||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||||
|
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SystemCatalog {
|
impl SystemCatalog {
|
||||||
@@ -256,7 +248,7 @@ impl SystemCatalog {
|
|||||||
|
|
||||||
fn table_names(&self, schema: &str) -> Vec<String> {
|
fn table_names(&self, schema: &str) -> Vec<String> {
|
||||||
if schema == INFORMATION_SCHEMA_NAME {
|
if schema == INFORMATION_SCHEMA_NAME {
|
||||||
vec![TABLES.to_string(), COLUMNS.to_string()]
|
self.information_schema_provider.table_names()
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||||
} else {
|
} else {
|
||||||
@@ -270,7 +262,7 @@ impl SystemCatalog {
|
|||||||
|
|
||||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||||
if schema == INFORMATION_SCHEMA_NAME {
|
if schema == INFORMATION_SCHEMA_NAME {
|
||||||
table == TABLES || table == COLUMNS
|
self.information_schema_provider.table(table).is_some()
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
table == NUMBERS_TABLE_NAME
|
table == NUMBERS_TABLE_NAME
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#![feature(trait_upcasting)]
|
|
||||||
#![feature(assert_matches)]
|
#![feature(assert_matches)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
|
|
||||||
|
|||||||
@@ -17,8 +17,10 @@ use std::collections::hash_map::Entry;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
|
use common_catalog::build_db_string;
|
||||||
use metrics::{decrement_gauge, increment_gauge};
|
use common_catalog::consts::{
|
||||||
|
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
|
||||||
|
};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
@@ -135,6 +137,18 @@ impl MemoryCatalogManager {
|
|||||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
manager
|
||||||
|
.register_schema_sync(RegisterSchemaRequest {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
manager
|
||||||
|
.register_schema_sync(RegisterSchemaRequest {
|
||||||
|
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||||
|
schema: INFORMATION_SCHEMA_NAME.to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
manager
|
manager
|
||||||
}
|
}
|
||||||
@@ -166,7 +180,7 @@ impl MemoryCatalogManager {
|
|||||||
let arc_self = Arc::new(self.clone());
|
let arc_self = Arc::new(self.clone());
|
||||||
let catalog = arc_self.create_catalog_entry(name);
|
let catalog = arc_self.create_catalog_entry(name);
|
||||||
e.insert(catalog);
|
e.insert(catalog);
|
||||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
|
crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT.inc();
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
Entry::Occupied(_) => Ok(false),
|
Entry::Occupied(_) => Ok(false),
|
||||||
@@ -187,11 +201,9 @@ impl MemoryCatalogManager {
|
|||||||
})?;
|
})?;
|
||||||
let result = schema.remove(&request.table_name);
|
let result = schema.remove(&request.table_name);
|
||||||
if result.is_some() {
|
if result.is_some() {
|
||||||
decrement_gauge!(
|
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||||
1.0,
|
.dec();
|
||||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -210,7 +222,7 @@ impl MemoryCatalogManager {
|
|||||||
match catalog.entry(request.schema) {
|
match catalog.entry(request.schema) {
|
||||||
Entry::Vacant(e) => {
|
Entry::Vacant(e) => {
|
||||||
e.insert(HashMap::new());
|
e.insert(HashMap::new());
|
||||||
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT, 1.0);
|
crate::metrics::METRIC_CATALOG_MANAGER_SCHEMA_COUNT.inc();
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
Entry::Occupied(_) => Ok(false),
|
Entry::Occupied(_) => Ok(false),
|
||||||
@@ -238,19 +250,19 @@ impl MemoryCatalogManager {
|
|||||||
.fail();
|
.fail();
|
||||||
}
|
}
|
||||||
schema.insert(request.table_name, request.table);
|
schema.insert(request.table_name, request.table);
|
||||||
increment_gauge!(
|
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT
|
||||||
crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
|
.with_label_values(&[build_db_string(&request.catalog, &request.schema).as_str()])
|
||||||
1.0,
|
.inc();
|
||||||
&[crate::metrics::db_label(&request.catalog, &request.schema)],
|
|
||||||
);
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||||
let information_schema = InformationSchemaProvider::build(
|
let information_schema_provider = InformationSchemaProvider::new(
|
||||||
catalog,
|
catalog,
|
||||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||||
);
|
);
|
||||||
|
let information_schema = information_schema_provider.tables().clone();
|
||||||
|
|
||||||
let mut catalog = HashMap::new();
|
let mut catalog = HashMap::new();
|
||||||
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
|
catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
|
||||||
catalog
|
catalog
|
||||||
|
|||||||
@@ -12,18 +12,24 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use common_catalog::build_db_string;
|
|
||||||
|
|
||||||
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
pub(crate) const METRIC_DB_LABEL: &str = "db";
|
||||||
|
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_CATALOG_COUNT: &str = "catalog.catalog_count";
|
use lazy_static::lazy_static;
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_SCHEMA_COUNT: &str = "catalog.schema_count";
|
use prometheus::*;
|
||||||
pub(crate) const METRIC_CATALOG_MANAGER_TABLE_COUNT: &str = "catalog.table_count";
|
|
||||||
|
|
||||||
pub(crate) const METRIC_CATALOG_KV_REMOTE_GET: &str = "catalog.kv.get.remote";
|
lazy_static! {
|
||||||
pub(crate) const METRIC_CATALOG_KV_GET: &str = "catalog.kv.get";
|
pub static ref METRIC_CATALOG_MANAGER_CATALOG_COUNT: IntGauge =
|
||||||
|
register_int_gauge!("catalog_catalog_count", "catalog catalog count").unwrap();
|
||||||
#[inline]
|
pub static ref METRIC_CATALOG_MANAGER_SCHEMA_COUNT: IntGauge =
|
||||||
pub(crate) fn db_label(catalog: &str, schema: &str) -> (&'static str, String) {
|
register_int_gauge!("catalog_schema_count", "catalog schema count").unwrap();
|
||||||
(METRIC_DB_LABEL, build_db_string(catalog, schema))
|
pub static ref METRIC_CATALOG_MANAGER_TABLE_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||||
|
"catalog_table_count",
|
||||||
|
"catalog table count",
|
||||||
|
&[METRIC_DB_LABEL]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
pub static ref METRIC_CATALOG_KV_REMOTE_GET: Histogram =
|
||||||
|
register_histogram!("catalog_kv_get_remote", "catalog kv get remote").unwrap();
|
||||||
|
pub static ref METRIC_CATALOG_KV_GET: Histogram =
|
||||||
|
register_histogram!("catalog_kv_get", "catalog kv get").unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,44 +8,45 @@ license.workspace = true
|
|||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
api = { workspace = true }
|
api.workspace = true
|
||||||
arrow-flight.workspace = true
|
arrow-flight.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-grpc = { workspace = true }
|
common-grpc.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-telemetry = { workspace = true }
|
common-telemetry.workspace = true
|
||||||
common-time = { workspace = true }
|
common-time.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
derive_builder.workspace = true
|
derive_builder.workspace = true
|
||||||
enum_dispatch = "0.3"
|
enum_dispatch = "0.3"
|
||||||
futures-util.workspace = true
|
futures-util.workspace = true
|
||||||
|
lazy_static.workspace = true
|
||||||
moka = { workspace = true, features = ["future"] }
|
moka = { workspace = true, features = ["future"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio-stream = { version = "0.1", features = ["net"] }
|
tokio-stream = { workspace = true, features = ["net"] }
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-grpc-expr = { workspace = true }
|
common-grpc-expr.workspace = true
|
||||||
datanode = { workspace = true }
|
datanode.workspace = true
|
||||||
derive-new = "0.5"
|
derive-new = "0.5"
|
||||||
prost.workspace = true
|
substrait.workspace = true
|
||||||
substrait = { workspace = true }
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
[dev-dependencies.substrait_proto]
|
[dev-dependencies.substrait_proto]
|
||||||
package = "substrait"
|
package = "substrait"
|
||||||
version = "0.7"
|
version = "0.17"
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Timestamp as i32,
|
semantic_type: SemanticType::Timestamp as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "key".to_string(),
|
name: "key".to_string(),
|
||||||
@@ -54,6 +55,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Tag as i32,
|
semantic_type: SemanticType::Tag as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
ColumnDef {
|
ColumnDef {
|
||||||
name: "value".to_string(),
|
name: "value".to_string(),
|
||||||
@@ -62,6 +64,7 @@ async fn run() {
|
|||||||
default_constraint: vec![],
|
default_constraint: vec![],
|
||||||
semantic_type: SemanticType::Field as i32,
|
semantic_type: SemanticType::Field as i32,
|
||||||
comment: String::new(),
|
comment: String::new(),
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
time_index: "timestamp".to_string(),
|
time_index: "timestamp".to_string(),
|
||||||
@@ -78,7 +81,7 @@ async fn run() {
|
|||||||
|
|
||||||
let logical = mock_logical_plan();
|
let logical = mock_logical_plan();
|
||||||
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
event!(Level::INFO, "plan size: {:#?}", logical.len());
|
||||||
let result = db.logical_plan(logical, 0).await.unwrap();
|
let result = db.logical_plan(logical).await.unwrap();
|
||||||
|
|
||||||
event!(Level::INFO, "result: {:#?}", result);
|
event!(Level::INFO, "result: {:#?}", result);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,11 +139,19 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn max_grpc_recv_message_size(&self) -> usize {
|
fn max_grpc_recv_message_size(&self) -> usize {
|
||||||
self.inner.channel_manager.config().max_recv_message_size
|
self.inner
|
||||||
|
.channel_manager
|
||||||
|
.config()
|
||||||
|
.max_recv_message_size
|
||||||
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_grpc_send_message_size(&self) -> usize {
|
fn max_grpc_send_message_size(&self) -> usize {
|
||||||
self.inner.channel_manager.config().max_send_message_size
|
self.inner
|
||||||
|
.channel_manager
|
||||||
|
.config()
|
||||||
|
.max_send_message_size
|
||||||
|
.as_bytes() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
|
||||||
|
|||||||
@@ -27,8 +27,9 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::RecordBatchStreamAdaptor;
|
use common_recordbatch::RecordBatchStreamWrapper;
|
||||||
use common_telemetry::{logging, timer};
|
use common_telemetry::logging;
|
||||||
|
use common_telemetry::tracing_context::W3cTrace;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
@@ -111,12 +112,12 @@ impl Database {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
pub async fn insert(&self, requests: InsertRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||||
self.handle(Request::Inserts(requests)).await
|
self.handle(Request::Inserts(requests)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_INSERT);
|
let _timer = metrics::METRIC_GRPC_INSERT.start_timer();
|
||||||
self.handle(Request::RowInserts(requests)).await
|
self.handle(Request::RowInserts(requests)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,51 +142,48 @@ impl Database {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
|
let _timer = metrics::METRIC_GRPC_DELETE.start_timer();
|
||||||
self.handle(Request::Deletes(request)).await
|
self.handle(Request::Deletes(request)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle(&self, request: Request) -> Result<u32> {
|
async fn handle(&self, request: Request) -> Result<u32> {
|
||||||
let mut client = self.client.make_database_client()?.inner;
|
let mut client = self.client.make_database_client()?.inner;
|
||||||
let request = self.to_rpc_request(request, 0);
|
let request = self.to_rpc_request(request);
|
||||||
let response = client.handle(request).await?.into_inner();
|
let response = client.handle(request).await?.into_inner();
|
||||||
from_grpc_response(response)
|
from_grpc_response(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn to_rpc_request(&self, request: Request, trace_id: u64) -> GreptimeRequest {
|
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
|
||||||
GreptimeRequest {
|
GreptimeRequest {
|
||||||
header: Some(RequestHeader {
|
header: Some(RequestHeader {
|
||||||
catalog: self.catalog.clone(),
|
catalog: self.catalog.clone(),
|
||||||
schema: self.schema.clone(),
|
schema: self.schema.clone(),
|
||||||
authorization: self.ctx.auth_header.clone(),
|
authorization: self.ctx.auth_header.clone(),
|
||||||
dbname: self.dbname.clone(),
|
dbname: self.dbname.clone(),
|
||||||
trace_id,
|
// TODO(Taylor-lagrange): add client grpc tracing
|
||||||
span_id: 0,
|
tracing_context: W3cTrace::new(),
|
||||||
}),
|
}),
|
||||||
request: Some(request),
|
request: Some(request),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn sql(&self, sql: &str) -> Result<Output> {
|
pub async fn sql<S>(&self, sql: S) -> Result<Output>
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_SQL);
|
where
|
||||||
self.do_get(
|
S: AsRef<str>,
|
||||||
Request::Query(QueryRequest {
|
{
|
||||||
query: Some(Query::Sql(sql.to_string())),
|
let _timer = metrics::METRIC_GRPC_SQL.start_timer();
|
||||||
}),
|
self.do_get(Request::Query(QueryRequest {
|
||||||
0,
|
query: Some(Query::Sql(sql.as_ref().to_string())),
|
||||||
)
|
}))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>, trace_id: u64) -> Result<Output> {
|
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_LOGICAL_PLAN);
|
let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Query(QueryRequest {
|
||||||
Request::Query(QueryRequest {
|
query: Some(Query::LogicalPlan(logical_plan)),
|
||||||
query: Some(Query::LogicalPlan(logical_plan)),
|
}))
|
||||||
}),
|
|
||||||
trace_id,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,69 +194,54 @@ impl Database {
|
|||||||
end: &str,
|
end: &str,
|
||||||
step: &str,
|
step: &str,
|
||||||
) -> Result<Output> {
|
) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_PROMQL_RANGE_QUERY);
|
let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Query(QueryRequest {
|
||||||
Request::Query(QueryRequest {
|
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
||||||
query: Some(Query::PromRangeQuery(PromRangeQuery {
|
query: promql.to_string(),
|
||||||
query: promql.to_string(),
|
start: start.to_string(),
|
||||||
start: start.to_string(),
|
end: end.to_string(),
|
||||||
end: end.to_string(),
|
step: step.to_string(),
|
||||||
step: step.to_string(),
|
})),
|
||||||
})),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_CREATE_TABLE);
|
let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::CreateTable(expr)),
|
||||||
expr: Some(DdlExpr::CreateTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
pub async fn alter(&self, expr: AlterExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_ALTER);
|
let _timer = metrics::METRIC_GRPC_ALTER.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::Alter(expr)),
|
||||||
expr: Some(DdlExpr::Alter(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
pub async fn drop_table(&self, expr: DropTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DROP_TABLE);
|
let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::DropTable(expr)),
|
||||||
expr: Some(DdlExpr::DropTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result<Output> {
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_TRUNCATE_TABLE);
|
let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer();
|
||||||
self.do_get(
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
Request::Ddl(DdlRequest {
|
expr: Some(DdlExpr::TruncateTable(expr)),
|
||||||
expr: Some(DdlExpr::TruncateTable(expr)),
|
}))
|
||||||
}),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_get(&self, request: Request, trace_id: u64) -> Result<Output> {
|
async fn do_get(&self, request: Request) -> Result<Output> {
|
||||||
// FIXME(paomian): should be added some labels for metrics
|
// FIXME(paomian): should be added some labels for metrics
|
||||||
let _timer = timer!(metrics::METRIC_GRPC_DO_GET);
|
let _timer = metrics::METRIC_GRPC_DO_GET.start_timer();
|
||||||
let request = self.to_rpc_request(request, trace_id);
|
let request = self.to_rpc_request(request);
|
||||||
let request = Ticket {
|
let request = Ticket {
|
||||||
ticket: request.encode_to_vec().into(),
|
ticket: request.encode_to_vec().into(),
|
||||||
};
|
};
|
||||||
@@ -332,7 +315,7 @@ impl Database {
|
|||||||
yield Ok(record_batch);
|
yield Ok(record_batch);
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
let record_batch_stream = RecordBatchStreamAdaptor {
|
let record_batch_stream = RecordBatchStreamWrapper {
|
||||||
schema,
|
schema,
|
||||||
stream,
|
stream,
|
||||||
output_ordering: None,
|
output_ordering: None,
|
||||||
|
|||||||
@@ -131,3 +131,15 @@ impl From<Status> for Error {
|
|||||||
Self::Server { code, msg }
|
Self::Server { code, msg }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
pub fn should_retry(&self) -> bool {
|
||||||
|
!matches!(
|
||||||
|
self,
|
||||||
|
Self::RegionServer {
|
||||||
|
code: Code::InvalidArgument,
|
||||||
|
..
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -26,6 +26,8 @@ use api::v1::greptime_response::Response;
|
|||||||
use api::v1::{AffectedRows, GreptimeResponse};
|
use api::v1::{AffectedRows, GreptimeResponse};
|
||||||
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
|
pub use common_query::Output;
|
||||||
|
pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
pub use self::client::Client;
|
pub use self::client::Client;
|
||||||
|
|||||||
@@ -12,15 +12,34 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! client metrics
|
use lazy_static::lazy_static;
|
||||||
pub const METRIC_GRPC_CREATE_TABLE: &str = "grpc.create_table";
|
use prometheus::*;
|
||||||
pub const METRIC_GRPC_PROMQL_RANGE_QUERY: &str = "grpc.promql.range_query";
|
|
||||||
pub const METRIC_GRPC_INSERT: &str = "grpc.insert";
|
lazy_static! {
|
||||||
pub const METRIC_GRPC_DELETE: &str = "grpc.delete";
|
pub static ref METRIC_GRPC_CREATE_TABLE: Histogram =
|
||||||
pub const METRIC_GRPC_SQL: &str = "grpc.sql";
|
register_histogram!("grpc_create_table", "grpc create table").unwrap();
|
||||||
pub const METRIC_GRPC_LOGICAL_PLAN: &str = "grpc.logical_plan";
|
pub static ref METRIC_GRPC_PROMQL_RANGE_QUERY: Histogram =
|
||||||
pub const METRIC_GRPC_ALTER: &str = "grpc.alter";
|
register_histogram!("grpc_promql_range_query", "grpc promql range query").unwrap();
|
||||||
pub const METRIC_GRPC_DROP_TABLE: &str = "grpc.drop_table";
|
pub static ref METRIC_GRPC_INSERT: Histogram =
|
||||||
pub const METRIC_GRPC_TRUNCATE_TABLE: &str = "grpc.truncate_table";
|
register_histogram!("grpc_insert", "grpc insert").unwrap();
|
||||||
pub const METRIC_GRPC_DO_GET: &str = "grpc.do_get";
|
pub static ref METRIC_GRPC_DELETE: Histogram =
|
||||||
pub(crate) const METRIC_REGION_REQUEST_GRPC: &str = "grpc.region_request";
|
register_histogram!("grpc_delete", "grpc delete").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_SQL: Histogram =
|
||||||
|
register_histogram!("grpc_sql", "grpc sql").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_LOGICAL_PLAN: Histogram =
|
||||||
|
register_histogram!("grpc_logical_plan", "grpc logical plan").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_ALTER: Histogram =
|
||||||
|
register_histogram!("grpc_alter", "grpc alter").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_DROP_TABLE: Histogram =
|
||||||
|
register_histogram!("grpc_drop_table", "grpc drop table").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_TRUNCATE_TABLE: Histogram =
|
||||||
|
register_histogram!("grpc_truncate_table", "grpc truncate table").unwrap();
|
||||||
|
pub static ref METRIC_GRPC_DO_GET: Histogram =
|
||||||
|
register_histogram!("grpc_do_get", "grpc do get").unwrap();
|
||||||
|
pub static ref METRIC_REGION_REQUEST_GRPC: HistogramVec = register_histogram_vec!(
|
||||||
|
"grpc_region_request",
|
||||||
|
"grpc region request",
|
||||||
|
&["request_type"]
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,13 +23,12 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
|
|||||||
use common_meta::datanode_manager::{AffectedRows, Datanode};
|
use common_meta::datanode_manager::{AffectedRows, Datanode};
|
||||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
use common_telemetry::{error, timer};
|
use common_telemetry::error;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::{location, Location, OptionExt, ResultExt};
|
use snafu::{location, Location, OptionExt, ResultExt};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
use crate::error::Error::RegionServer;
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
|
||||||
MissingFieldSnafu, Result, ServerSnafu,
|
MissingFieldSnafu, Result, ServerSnafu,
|
||||||
@@ -45,7 +44,7 @@ pub struct RegionRequester {
|
|||||||
impl Datanode for RegionRequester {
|
impl Datanode for RegionRequester {
|
||||||
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
|
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
|
||||||
self.handle_inner(request).await.map_err(|err| {
|
self.handle_inner(request).await.map_err(|err| {
|
||||||
if matches!(err, RegionServer { .. }) {
|
if err.should_retry() {
|
||||||
meta_error::Error::RetryLater {
|
meta_error::Error::RetryLater {
|
||||||
source: BoxedError::new(err),
|
source: BoxedError::new(err),
|
||||||
}
|
}
|
||||||
@@ -137,7 +136,7 @@ impl RegionRequester {
|
|||||||
yield Ok(record_batch);
|
yield Ok(record_batch);
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
let record_batch_stream = RecordBatchStreamAdaptor {
|
let record_batch_stream = RecordBatchStreamWrapper {
|
||||||
schema,
|
schema,
|
||||||
stream,
|
stream,
|
||||||
output_ordering: None,
|
output_ordering: None,
|
||||||
@@ -152,11 +151,9 @@ impl RegionRequester {
|
|||||||
.with_context(|| MissingFieldSnafu { field: "body" })?
|
.with_context(|| MissingFieldSnafu { field: "body" })?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.to_string();
|
.to_string();
|
||||||
|
let _timer = metrics::METRIC_REGION_REQUEST_GRPC
|
||||||
let _timer = timer!(
|
.with_label_values(&[request_type.as_str()])
|
||||||
metrics::METRIC_REGION_REQUEST_GRPC,
|
.start_timer();
|
||||||
&[("request_type", request_type)]
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut client = self.client.raw_region_client()?;
|
let mut client = self.client.raw_region_client()?;
|
||||||
|
|
||||||
|
|||||||
@@ -10,71 +10,72 @@ name = "greptime"
|
|||||||
path = "src/bin/greptime.rs"
|
path = "src/bin/greptime.rs"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["metrics-process"]
|
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
metrics-process = ["servers/metrics-process"]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anymap = "1.0.0-beta.2"
|
anymap = "1.0.0-beta.2"
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
catalog = { workspace = true }
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap = { version = "3.1", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
client = { workspace = true }
|
client.workspace = true
|
||||||
common-base = { workspace = true }
|
common-base.workspace = true
|
||||||
common-catalog = { workspace = true }
|
common-catalog.workspace = true
|
||||||
common-config = { workspace = true }
|
common-config.workspace = true
|
||||||
common-error = { workspace = true }
|
common-error.workspace = true
|
||||||
common-macro = { workspace = true }
|
common-macro.workspace = true
|
||||||
common-meta = { workspace = true }
|
common-meta.workspace = true
|
||||||
common-procedure = { workspace = true }
|
common-procedure.workspace = true
|
||||||
common-query = { workspace = true }
|
common-query.workspace = true
|
||||||
common-recordbatch = { workspace = true }
|
common-recordbatch.workspace = true
|
||||||
common-telemetry = { workspace = true, features = [
|
common-telemetry = { workspace = true, features = [
|
||||||
"deadlock_detection",
|
"deadlock_detection",
|
||||||
] }
|
] }
|
||||||
|
common-time.workspace = true
|
||||||
config = "0.13"
|
config = "0.13"
|
||||||
datanode = { workspace = true }
|
datanode.workspace = true
|
||||||
datatypes = { workspace = true }
|
datatypes.workspace = true
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
file-engine = { workspace = true }
|
file-engine.workspace = true
|
||||||
frontend = { workspace = true }
|
frontend.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
human-panic = "1.2.2"
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
meta-client = { workspace = true }
|
meta-client.workspace = true
|
||||||
meta-srv = { workspace = true }
|
meta-srv.workspace = true
|
||||||
metrics.workspace = true
|
mito2.workspace = true
|
||||||
mito2 = { workspace = true }
|
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
partition = { workspace = true }
|
partition.workspace = true
|
||||||
plugins.workspace = true
|
plugins.workspace = true
|
||||||
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
query = { workspace = true }
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
rustyline = "10.1"
|
rustyline = "10.1"
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
servers = { workspace = true }
|
servers.workspace = true
|
||||||
session = { workspace = true }
|
session.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
substrait = { workspace = true }
|
store-api.workspace = true
|
||||||
table = { workspace = true }
|
substrait.workspace = true
|
||||||
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dependencies]
|
[target.'cfg(not(windows))'.dependencies]
|
||||||
tikv-jemallocator = "0.5"
|
tikv-jemallocator = "0.5"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-test-util = { workspace = true }
|
common-test-util.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
temp-env = "0.3"
|
temp-env = "0.3"
|
||||||
toml.workspace = true
|
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dev-dependencies]
|
[target.'cfg(not(windows))'.dev-dependencies]
|
||||||
rexpect = "0.5"
|
rexpect = "0.5"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
common-version = { workspace = true }
|
common-version.workspace = true
|
||||||
|
|||||||
@@ -16,75 +16,12 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::{FromArgMatches, Parser, Subcommand};
|
||||||
use cmd::error::Result;
|
use cmd::error::Result;
|
||||||
use cmd::options::{Options, TopLevelOptions};
|
use cmd::options::{CliOptions, Options};
|
||||||
use cmd::{cli, datanode, frontend, metasrv, standalone};
|
use cmd::{
|
||||||
use common_telemetry::logging::{error, info, TracingOptions};
|
cli, datanode, frontend, greptimedb_cli, log_versions, metasrv, standalone, start_app, App,
|
||||||
use metrics::gauge;
|
};
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[clap(name = "greptimedb", version = print_version())]
|
|
||||||
struct Command {
|
|
||||||
#[clap(long)]
|
|
||||||
log_dir: Option<String>,
|
|
||||||
#[clap(long)]
|
|
||||||
log_level: Option<String>,
|
|
||||||
#[clap(subcommand)]
|
|
||||||
subcmd: SubCommand,
|
|
||||||
|
|
||||||
#[cfg(feature = "tokio-console")]
|
|
||||||
#[clap(long)]
|
|
||||||
tokio_console_addr: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum Application {
|
|
||||||
Datanode(datanode::Instance),
|
|
||||||
Frontend(frontend::Instance),
|
|
||||||
Metasrv(metasrv::Instance),
|
|
||||||
Standalone(standalone::Instance),
|
|
||||||
Cli(cli::Instance),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Application {
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Application::Datanode(instance) => instance.start().await,
|
|
||||||
Application::Frontend(instance) => instance.start().await,
|
|
||||||
Application::Metasrv(instance) => instance.start().await,
|
|
||||||
Application::Standalone(instance) => instance.start().await,
|
|
||||||
Application::Cli(instance) => instance.start().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn stop(&self) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Application::Datanode(instance) => instance.stop().await,
|
|
||||||
Application::Frontend(instance) => instance.stop().await,
|
|
||||||
Application::Metasrv(instance) => instance.stop().await,
|
|
||||||
Application::Standalone(instance) => instance.stop().await,
|
|
||||||
Application::Cli(instance) => instance.stop().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Command {
|
|
||||||
async fn build(self, opts: Options) -> Result<Application> {
|
|
||||||
self.subcmd.build(opts).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_options(&self) -> Result<Options> {
|
|
||||||
let top_level_opts = self.top_level_options();
|
|
||||||
self.subcmd.load_options(top_level_opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn top_level_options(&self) -> TopLevelOptions {
|
|
||||||
TopLevelOptions {
|
|
||||||
log_dir: self.log_dir.clone(),
|
|
||||||
log_level: self.log_level.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
@@ -101,40 +38,41 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self, opts: Options) -> Result<Application> {
|
async fn build(self, opts: Options) -> Result<Box<dyn App>> {
|
||||||
match (self, opts) {
|
let app: Box<dyn App> = match (self, opts) {
|
||||||
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
||||||
let app = cmd.build(*dn_opts).await?;
|
let app = cmd.build(*dn_opts).await?;
|
||||||
Ok(Application::Datanode(app))
|
Box::new(app) as _
|
||||||
}
|
}
|
||||||
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
||||||
let app = cmd.build(*fe_opts).await?;
|
let app = cmd.build(*fe_opts).await?;
|
||||||
Ok(Application::Frontend(app))
|
Box::new(app) as _
|
||||||
}
|
}
|
||||||
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
||||||
let app = cmd.build(*meta_opts).await?;
|
let app = cmd.build(*meta_opts).await?;
|
||||||
Ok(Application::Metasrv(app))
|
Box::new(app) as _
|
||||||
}
|
}
|
||||||
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
||||||
let app = cmd.build(*opts).await?;
|
let app = cmd.build(*opts).await?;
|
||||||
Ok(Application::Standalone(app))
|
Box::new(app) as _
|
||||||
}
|
}
|
||||||
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
||||||
let app = cmd.build().await?;
|
let app = cmd.build().await?;
|
||||||
Ok(Application::Cli(app))
|
Box::new(app) as _
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
};
|
||||||
|
Ok(app)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Datanode(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Datanode(cmd) => cmd.load_options(cli_options),
|
||||||
SubCommand::Frontend(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Frontend(cmd) => cmd.load_options(cli_options),
|
||||||
SubCommand::Metasrv(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Metasrv(cmd) => cmd.load_options(cli_options),
|
||||||
SubCommand::Standalone(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Standalone(cmd) => cmd.load_options(cli_options),
|
||||||
SubCommand::Cli(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Cli(cmd) => cmd.load_options(cli_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,88 +89,49 @@ impl fmt::Display for SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_version() -> &'static str {
|
|
||||||
concat!(
|
|
||||||
"\nbranch: ",
|
|
||||||
env!("GIT_BRANCH"),
|
|
||||||
"\ncommit: ",
|
|
||||||
env!("GIT_COMMIT"),
|
|
||||||
"\ndirty: ",
|
|
||||||
env!("GIT_DIRTY"),
|
|
||||||
"\nversion: ",
|
|
||||||
env!("CARGO_PKG_VERSION")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn short_version() -> &'static str {
|
|
||||||
env!("CARGO_PKG_VERSION")
|
|
||||||
}
|
|
||||||
|
|
||||||
// {app_name}-{branch_name}-{commit_short}
|
|
||||||
// The branch name (tag) of a release build should already contain the short
|
|
||||||
// version so the full version doesn't concat the short version explicitly.
|
|
||||||
fn full_version() -> &'static str {
|
|
||||||
concat!(
|
|
||||||
"greptimedb-",
|
|
||||||
env!("GIT_BRANCH"),
|
|
||||||
"-",
|
|
||||||
env!("GIT_COMMIT_SHORT")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn log_env_flags() {
|
|
||||||
info!("command line arguments");
|
|
||||||
for argument in std::env::args() {
|
|
||||||
info!("argument: {}", argument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let cmd = Command::parse();
|
let metadata = human_panic::Metadata {
|
||||||
let app_name = &cmd.subcmd.to_string();
|
version: env!("CARGO_PKG_VERSION").into(),
|
||||||
|
name: "GreptimeDB".into(),
|
||||||
let opts = cmd.load_options()?;
|
authors: Default::default(),
|
||||||
let logging_opts = opts.logging_options();
|
homepage: "https://github.com/GreptimeTeam/greptimedb/discussions".into(),
|
||||||
let tracing_opts = TracingOptions {
|
|
||||||
#[cfg(feature = "tokio-console")]
|
|
||||||
tokio_console_addr: cmd.tokio_console_addr.clone(),
|
|
||||||
};
|
};
|
||||||
|
human_panic::setup_panic!(metadata);
|
||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
common_telemetry::init_default_metrics_recorder();
|
|
||||||
let _guard = common_telemetry::init_global_logging(app_name, logging_opts, tracing_opts);
|
|
||||||
|
|
||||||
// Report app version as gauge.
|
let cli = greptimedb_cli();
|
||||||
gauge!("app_version", 1.0, "short_version" => short_version(), "version" => full_version());
|
|
||||||
|
|
||||||
// Log version and argument flags.
|
let cli = SubCommand::augment_subcommands(cli);
|
||||||
info!(
|
|
||||||
"short_version: {}, full_version: {}",
|
let args = cli.get_matches();
|
||||||
short_version(),
|
|
||||||
full_version()
|
let subcmd = match SubCommand::from_arg_matches(&args) {
|
||||||
|
Ok(subcmd) => subcmd,
|
||||||
|
Err(e) => e.exit(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let app_name = subcmd.to_string();
|
||||||
|
|
||||||
|
let cli_options = CliOptions::new(&args);
|
||||||
|
|
||||||
|
let opts = subcmd.load_options(&cli_options)?;
|
||||||
|
|
||||||
|
let _guard = common_telemetry::init_global_logging(
|
||||||
|
&app_name,
|
||||||
|
opts.logging_options(),
|
||||||
|
cli_options.tracing_options(),
|
||||||
|
opts.node_id(),
|
||||||
);
|
);
|
||||||
log_env_flags();
|
|
||||||
|
|
||||||
let mut app = cmd.build(opts).await?;
|
log_versions();
|
||||||
|
|
||||||
tokio::select! {
|
let app = subcmd.build(opts).await?;
|
||||||
result = app.start() => {
|
|
||||||
if let Err(err) = result {
|
|
||||||
error!(err; "Fatal error occurs!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = tokio::signal::ctrl_c() => {
|
|
||||||
if let Err(err) = app.stop().await {
|
|
||||||
error!(err; "Fatal error occurs!");
|
|
||||||
}
|
|
||||||
info!("Goodbye!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
start_app(app).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,8 +13,15 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod bench;
|
mod bench;
|
||||||
|
|
||||||
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
|
#[allow(unused)]
|
||||||
mod cmd;
|
mod cmd;
|
||||||
|
mod export;
|
||||||
mod helper;
|
mod helper;
|
||||||
|
|
||||||
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
|
#[allow(unused)]
|
||||||
mod repl;
|
mod repl;
|
||||||
// TODO(weny): Removes it
|
// TODO(weny): Removes it
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
@@ -27,28 +34,37 @@ use common_telemetry::logging::LoggingOptions;
|
|||||||
pub use repl::Repl;
|
pub use repl::Repl;
|
||||||
use upgrade::UpgradeCommand;
|
use upgrade::UpgradeCommand;
|
||||||
|
|
||||||
|
use self::export::ExportCommand;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::options::{Options, TopLevelOptions};
|
use crate::options::{CliOptions, Options};
|
||||||
|
use crate::App;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Tool {
|
pub trait Tool: Send + Sync {
|
||||||
async fn do_work(&self) -> Result<()>;
|
async fn do_work(&self) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum Instance {
|
pub struct Instance {
|
||||||
Repl(Repl),
|
tool: Box<dyn Tool>,
|
||||||
Tool(Box<dyn Tool>),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub async fn start(&mut self) -> Result<()> {
|
fn new(tool: Box<dyn Tool>) -> Self {
|
||||||
match self {
|
Self { tool }
|
||||||
Instance::Repl(repl) => repl.run().await,
|
}
|
||||||
Instance::Tool(tool) => tool.do_work().await,
|
}
|
||||||
}
|
|
||||||
|
#[async_trait]
|
||||||
|
impl App for Instance {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"greptime-cli"
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stop(&self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
|
self.tool.do_work().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn stop(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,31 +80,34 @@ impl Command {
|
|||||||
self.cmd.build().await
|
self.cmd.build().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||||
let mut logging_opts = LoggingOptions::default();
|
let mut logging_opts = LoggingOptions::default();
|
||||||
if let Some(dir) = top_level_opts.log_dir {
|
|
||||||
logging_opts.dir = dir;
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
}
|
logging_opts.dir = dir.clone();
|
||||||
if top_level_opts.log_level.is_some() {
|
|
||||||
logging_opts.level = top_level_opts.log_level;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logging_opts.level = cli_options.log_level.clone();
|
||||||
|
|
||||||
Ok(Options::Cli(Box::new(logging_opts)))
|
Ok(Options::Cli(Box::new(logging_opts)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
Attach(AttachCommand),
|
// Attach(AttachCommand),
|
||||||
Upgrade(UpgradeCommand),
|
Upgrade(UpgradeCommand),
|
||||||
Bench(BenchTableMetadataCommand),
|
Bench(BenchTableMetadataCommand),
|
||||||
|
Export(ExportCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self) -> Result<Instance> {
|
async fn build(self) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Attach(cmd) => cmd.build().await,
|
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
SubCommand::Upgrade(cmd) => cmd.build().await,
|
||||||
SubCommand::Bench(cmd) => cmd.build().await,
|
SubCommand::Bench(cmd) => cmd.build().await,
|
||||||
|
SubCommand::Export(cmd) => cmd.build().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,51 +123,8 @@ pub(crate) struct AttachCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AttachCommand {
|
impl AttachCommand {
|
||||||
|
#[allow(dead_code)]
|
||||||
async fn build(self) -> Result<Instance> {
|
async fn build(self) -> Result<Instance> {
|
||||||
let repl = Repl::try_new(&self).await?;
|
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||||
Ok(Instance::Repl(repl))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_load_options() {
|
|
||||||
let cmd = Command {
|
|
||||||
cmd: SubCommand::Attach(AttachCommand {
|
|
||||||
grpc_addr: String::from(""),
|
|
||||||
meta_addr: None,
|
|
||||||
disable_helper: false,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let opts = cmd.load_options(TopLevelOptions::default()).unwrap();
|
|
||||||
let logging_opts = opts.logging_options();
|
|
||||||
assert_eq!("/tmp/greptimedb/logs", logging_opts.dir);
|
|
||||||
assert!(logging_opts.level.is_none());
|
|
||||||
assert!(!logging_opts.enable_jaeger_tracing);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_level_options() {
|
|
||||||
let cmd = Command {
|
|
||||||
cmd: SubCommand::Attach(AttachCommand {
|
|
||||||
grpc_addr: String::from(""),
|
|
||||||
meta_addr: None,
|
|
||||||
disable_helper: false,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let opts = cmd
|
|
||||||
.load_options(TopLevelOptions {
|
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
|
||||||
log_level: Some("debug".to_string()),
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
let logging_opts = opts.logging_options();
|
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opts.dir);
|
|
||||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -20,15 +20,15 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_meta::table_name::TableName;
|
use common_meta::table_name::TableName;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use meta_srv::service::store::etcd::EtcdStore;
|
|
||||||
use meta_srv::service::store::kv::KvBackendAdapter;
|
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||||
|
|
||||||
use self::metadata::TableMetadataBencher;
|
use self::metadata::TableMetadataBencher;
|
||||||
@@ -64,15 +64,13 @@ impl BenchTableMetadataCommand {
|
|||||||
pub async fn build(&self) -> Result<Instance> {
|
pub async fn build(&self) -> Result<Instance> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr]).await.unwrap();
|
||||||
|
|
||||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(KvBackendAdapter::wrap(
|
let table_metadata_manager = Arc::new(TableMetadataManager::new(etcd_store));
|
||||||
etcd_store,
|
|
||||||
)));
|
|
||||||
|
|
||||||
let tool = BenchTableMetadata {
|
let tool = BenchTableMetadata {
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
count: self.count,
|
count: self.count,
|
||||||
};
|
};
|
||||||
Ok(Instance::Tool(Box::new(tool)))
|
Ok(Instance::new(Box::new(tool)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,12 +138,12 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_region_routes() -> Vec<RegionRoute> {
|
fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||||
let mut regions = Vec::with_capacity(100);
|
let mut region_routes = Vec::with_capacity(100);
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
|
|
||||||
for region_id in 0..64u64 {
|
for region_id in regions.into_iter().map(u64::from) {
|
||||||
regions.push(RegionRoute {
|
region_routes.push(RegionRoute {
|
||||||
region: Region {
|
region: Region {
|
||||||
id: region_id.into(),
|
id: region_id.into(),
|
||||||
name: String::new(),
|
name: String::new(),
|
||||||
@@ -157,8 +155,15 @@ fn create_region_routes() -> Vec<RegionRoute> {
|
|||||||
addr: String::new(),
|
addr: String::new(),
|
||||||
}),
|
}),
|
||||||
follower_peers: vec![],
|
follower_peers: vec![],
|
||||||
|
leader_status: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
regions
|
region_routes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_region_wal_options(regions: Vec<RegionNumber>) -> HashMap<RegionNumber, String> {
|
||||||
|
// TODO(niebayes): construct region wal options for benchmark.
|
||||||
|
let _ = regions;
|
||||||
|
HashMap::default()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,10 +14,13 @@
|
|||||||
|
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::table_name::TableName;
|
use common_meta::table_name::TableName;
|
||||||
|
|
||||||
use super::{bench_self_recorded, create_region_routes, create_table_info};
|
use crate::cli::bench::{
|
||||||
|
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||||
|
};
|
||||||
|
|
||||||
pub struct TableMetadataBencher {
|
pub struct TableMetadataBencher {
|
||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
@@ -43,12 +46,19 @@ impl TableMetadataBencher {
|
|||||||
let table_name = format!("bench_table_name_{}", i);
|
let table_name = format!("bench_table_name_{}", i);
|
||||||
let table_name = TableName::new("bench_catalog", "bench_schema", table_name);
|
let table_name = TableName::new("bench_catalog", "bench_schema", table_name);
|
||||||
let table_info = create_table_info(i, table_name);
|
let table_info = create_table_info(i, table_name);
|
||||||
let region_routes = create_region_routes();
|
|
||||||
|
let regions: Vec<_> = (0..64).collect();
|
||||||
|
let region_routes = create_region_routes(regions.clone());
|
||||||
|
let region_wal_options = create_region_wal_options(regions);
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
self.table_metadata_manager
|
self.table_metadata_manager
|
||||||
.create_table_metadata(table_info, region_routes)
|
.create_table_metadata(
|
||||||
|
table_info,
|
||||||
|
TableRouteValue::physical(region_routes),
|
||||||
|
region_wal_options,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|||||||
420
src/cmd/src/cli/export.rs
Normal file
420
src/cmd/src/cli/export.rs
Normal file
@@ -0,0 +1,420 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use client::api::v1::auth_header::AuthScheme;
|
||||||
|
use client::api::v1::Basic;
|
||||||
|
use client::{Client, Database, DEFAULT_SCHEMA_NAME};
|
||||||
|
use common_query::Output;
|
||||||
|
use common_recordbatch::util::collect;
|
||||||
|
use common_telemetry::{debug, error, info, warn};
|
||||||
|
use datatypes::scalars::ScalarVector;
|
||||||
|
use datatypes::vectors::{StringVector, Vector};
|
||||||
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use tokio::fs::File;
|
||||||
|
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
|
|
||||||
|
use crate::cli::{Instance, Tool};
|
||||||
|
use crate::error::{
|
||||||
|
CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu,
|
||||||
|
IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu,
|
||||||
|
Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
type TableReference = (String, String, String);
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||||
|
enum ExportTarget {
|
||||||
|
/// Corresponding to `SHOW CREATE TABLE`
|
||||||
|
#[default]
|
||||||
|
CreateTable,
|
||||||
|
/// Corresponding to `EXPORT TABLE`
|
||||||
|
TableData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Parser)]
|
||||||
|
pub struct ExportCommand {
|
||||||
|
/// Server address to connect
|
||||||
|
#[clap(long)]
|
||||||
|
addr: String,
|
||||||
|
|
||||||
|
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||||
|
#[clap(long)]
|
||||||
|
output_dir: String,
|
||||||
|
|
||||||
|
/// The name of the catalog to export. Default to "greptime-*"".
|
||||||
|
#[clap(long, default_value = "")]
|
||||||
|
database: String,
|
||||||
|
|
||||||
|
/// Parallelism of the export.
|
||||||
|
#[clap(long, short = 'j', default_value = "1")]
|
||||||
|
export_jobs: usize,
|
||||||
|
|
||||||
|
/// Max retry times for each job.
|
||||||
|
#[clap(long, default_value = "3")]
|
||||||
|
max_retry: usize,
|
||||||
|
|
||||||
|
/// Things to export
|
||||||
|
#[clap(long, short = 't', value_enum)]
|
||||||
|
target: ExportTarget,
|
||||||
|
|
||||||
|
/// basic authentication for connecting to the server
|
||||||
|
#[clap(long)]
|
||||||
|
auth_basic: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExportCommand {
|
||||||
|
pub async fn build(&self) -> Result<Instance> {
|
||||||
|
let client = Client::with_urls([self.addr.clone()]);
|
||||||
|
client
|
||||||
|
.health_check()
|
||||||
|
.await
|
||||||
|
.with_context(|_| ConnectServerSnafu {
|
||||||
|
addr: self.addr.clone(),
|
||||||
|
})?;
|
||||||
|
let (catalog, schema) = split_database(&self.database)?;
|
||||||
|
let mut database_client = Database::new(
|
||||||
|
catalog.clone(),
|
||||||
|
schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
|
||||||
|
client,
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(auth_basic) = &self.auth_basic {
|
||||||
|
let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu {
|
||||||
|
msg: "auth_basic cannot be split by ':'".to_string(),
|
||||||
|
})?;
|
||||||
|
database_client.set_auth(AuthScheme::Basic(Basic {
|
||||||
|
username: username.to_string(),
|
||||||
|
password: password.to_string(),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Instance::new(Box::new(Export {
|
||||||
|
client: database_client,
|
||||||
|
catalog,
|
||||||
|
schema,
|
||||||
|
output_dir: self.output_dir.clone(),
|
||||||
|
parallelism: self.export_jobs,
|
||||||
|
target: self.target.clone(),
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Export {
|
||||||
|
client: Database,
|
||||||
|
catalog: String,
|
||||||
|
schema: Option<String>,
|
||||||
|
output_dir: String,
|
||||||
|
parallelism: usize,
|
||||||
|
target: ExportTarget,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Export {
|
||||||
|
/// Iterate over all db names.
|
||||||
|
///
|
||||||
|
/// Newbie: `db_name` is catalog + schema.
|
||||||
|
async fn iter_db_names(&self) -> Result<Vec<(String, String)>> {
|
||||||
|
if let Some(schema) = &self.schema {
|
||||||
|
Ok(vec![(self.catalog.clone(), schema.clone())])
|
||||||
|
} else {
|
||||||
|
let mut client = self.client.clone();
|
||||||
|
client.set_catalog(self.catalog.clone());
|
||||||
|
let result =
|
||||||
|
client
|
||||||
|
.sql("show databases")
|
||||||
|
.await
|
||||||
|
.with_context(|_| RequestDatabaseSnafu {
|
||||||
|
sql: "show databases".to_string(),
|
||||||
|
})?;
|
||||||
|
let Output::Stream(stream) = result else {
|
||||||
|
NotDataFromOutputSnafu.fail()?
|
||||||
|
};
|
||||||
|
let record_batch = collect(stream)
|
||||||
|
.await
|
||||||
|
.context(CollectRecordBatchesSnafu)?
|
||||||
|
.pop()
|
||||||
|
.context(EmptyResultSnafu)?;
|
||||||
|
let schemas = record_batch
|
||||||
|
.column(0)
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<StringVector>()
|
||||||
|
.unwrap();
|
||||||
|
let mut result = Vec::with_capacity(schemas.len());
|
||||||
|
for i in 0..schemas.len() {
|
||||||
|
let schema = schemas.get_data(i).unwrap().to_owned();
|
||||||
|
if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
result.push((self.catalog.clone(), schema));
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a list of [`TableReference`] to be exported.
|
||||||
|
/// Includes all tables under the given `catalog` and `schema`
|
||||||
|
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
|
||||||
|
// TODO: SQL injection hurts
|
||||||
|
let sql = format!(
|
||||||
|
"select table_catalog, table_schema, table_name from \
|
||||||
|
information_schema.tables where table_type = \'BASE TABLE\'\
|
||||||
|
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'",
|
||||||
|
);
|
||||||
|
let mut client = self.client.clone();
|
||||||
|
client.set_catalog(catalog);
|
||||||
|
client.set_schema(schema);
|
||||||
|
let result = client
|
||||||
|
.sql(&sql)
|
||||||
|
.await
|
||||||
|
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||||
|
let Output::Stream(stream) = result else {
|
||||||
|
NotDataFromOutputSnafu.fail()?
|
||||||
|
};
|
||||||
|
let Some(record_batch) = collect(stream)
|
||||||
|
.await
|
||||||
|
.context(CollectRecordBatchesSnafu)?
|
||||||
|
.pop()
|
||||||
|
else {
|
||||||
|
return Ok(vec![]);
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!("Fetched table list: {}", record_batch.pretty_print());
|
||||||
|
|
||||||
|
if record_batch.num_rows() == 0 {
|
||||||
|
return Ok(vec![]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Vec::with_capacity(record_batch.num_rows());
|
||||||
|
let catalog_column = record_batch
|
||||||
|
.column(0)
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<StringVector>()
|
||||||
|
.unwrap();
|
||||||
|
let schema_column = record_batch
|
||||||
|
.column(1)
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<StringVector>()
|
||||||
|
.unwrap();
|
||||||
|
let table_column = record_batch
|
||||||
|
.column(2)
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<StringVector>()
|
||||||
|
.unwrap();
|
||||||
|
for i in 0..record_batch.num_rows() {
|
||||||
|
let catalog = catalog_column.get_data(i).unwrap().to_owned();
|
||||||
|
let schema = schema_column.get_data(i).unwrap().to_owned();
|
||||||
|
let table = table_column.get_data(i).unwrap().to_owned();
|
||||||
|
result.push((catalog, schema, table));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||||
|
let sql = format!("show create table {}.{}.{}", catalog, schema, table);
|
||||||
|
let mut client = self.client.clone();
|
||||||
|
client.set_catalog(catalog);
|
||||||
|
client.set_schema(schema);
|
||||||
|
let result = client
|
||||||
|
.sql(&sql)
|
||||||
|
.await
|
||||||
|
.with_context(|_| RequestDatabaseSnafu { sql })?;
|
||||||
|
let Output::Stream(stream) = result else {
|
||||||
|
NotDataFromOutputSnafu.fail()?
|
||||||
|
};
|
||||||
|
let record_batch = collect(stream)
|
||||||
|
.await
|
||||||
|
.context(CollectRecordBatchesSnafu)?
|
||||||
|
.pop()
|
||||||
|
.context(EmptyResultSnafu)?;
|
||||||
|
let create_table = record_batch
|
||||||
|
.column(1)
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<StringVector>()
|
||||||
|
.unwrap()
|
||||||
|
.get_data(0)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(format!("{create_table};\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn export_create_table(&self) -> Result<()> {
|
||||||
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
|
let db_names = self.iter_db_names().await?;
|
||||||
|
let db_count = db_names.len();
|
||||||
|
let mut tasks = Vec::with_capacity(db_names.len());
|
||||||
|
for (catalog, schema) in db_names {
|
||||||
|
let semaphore_moved = semaphore.clone();
|
||||||
|
tasks.push(async move {
|
||||||
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
|
let table_list = self.get_table_list(&catalog, &schema).await?;
|
||||||
|
let table_count = table_list.len();
|
||||||
|
tokio::fs::create_dir_all(&self.output_dir)
|
||||||
|
.await
|
||||||
|
.context(FileIoSnafu)?;
|
||||||
|
let output_file =
|
||||||
|
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||||
|
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||||
|
for (c, s, t) in table_list {
|
||||||
|
match self.show_create_table(&c, &s, &t).await {
|
||||||
|
Err(e) => {
|
||||||
|
error!(e; "Failed to export table {}.{}.{}", c, s, t)
|
||||||
|
}
|
||||||
|
Ok(create_table) => {
|
||||||
|
file.write_all(create_table.as_bytes())
|
||||||
|
.await
|
||||||
|
.context(FileIoSnafu)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("finished exporting {catalog}.{schema} with {table_count} tables",);
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let success = futures::future::join_all(tasks)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.filter(|r| match r {
|
||||||
|
Ok(_) => true,
|
||||||
|
Err(e) => {
|
||||||
|
error!(e; "export job failed");
|
||||||
|
false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.count();
|
||||||
|
|
||||||
|
info!("success {success}/{db_count} jobs");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn export_table_data(&self) -> Result<()> {
|
||||||
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
|
let db_names = self.iter_db_names().await?;
|
||||||
|
let db_count = db_names.len();
|
||||||
|
let mut tasks = Vec::with_capacity(db_names.len());
|
||||||
|
for (catalog, schema) in db_names {
|
||||||
|
let semaphore_moved = semaphore.clone();
|
||||||
|
tasks.push(async move {
|
||||||
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
|
tokio::fs::create_dir_all(&self.output_dir)
|
||||||
|
.await
|
||||||
|
.context(FileIoSnafu)?;
|
||||||
|
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||||
|
|
||||||
|
let mut client = self.client.clone();
|
||||||
|
client.set_catalog(catalog.clone());
|
||||||
|
client.set_schema(schema.clone());
|
||||||
|
|
||||||
|
// copy database to
|
||||||
|
let sql = format!(
|
||||||
|
"copy database {} to '{}' with (format='parquet');",
|
||||||
|
schema,
|
||||||
|
output_dir.to_str().unwrap()
|
||||||
|
);
|
||||||
|
client
|
||||||
|
.sql(sql.clone())
|
||||||
|
.await
|
||||||
|
.context(RequestDatabaseSnafu { sql })?;
|
||||||
|
info!("finished exporting {catalog}.{schema} data");
|
||||||
|
|
||||||
|
// export copy from sql
|
||||||
|
let dir_filenames = match output_dir.read_dir() {
|
||||||
|
Ok(dir) => dir,
|
||||||
|
Err(_) => {
|
||||||
|
warn!("empty database {catalog}.{schema}");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let copy_from_file =
|
||||||
|
Path::new(&self.output_dir).join(format!("{catalog}-{schema}_copy_from.sql"));
|
||||||
|
let mut writer =
|
||||||
|
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||||
|
|
||||||
|
for table_file in dir_filenames {
|
||||||
|
let table_file = table_file.unwrap();
|
||||||
|
let table_name = table_file
|
||||||
|
.file_name()
|
||||||
|
.into_string()
|
||||||
|
.unwrap()
|
||||||
|
.replace(".parquet", "");
|
||||||
|
|
||||||
|
writer
|
||||||
|
.write(
|
||||||
|
format!(
|
||||||
|
"copy {} from '{}' with (format='parquet');\n",
|
||||||
|
table_name,
|
||||||
|
table_file.path().to_str().unwrap()
|
||||||
|
)
|
||||||
|
.as_bytes(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context(FileIoSnafu)?;
|
||||||
|
}
|
||||||
|
writer.flush().await.context(FileIoSnafu)?;
|
||||||
|
|
||||||
|
info!("finished exporting {catalog}.{schema} copy_from.sql");
|
||||||
|
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let success = futures::future::join_all(tasks)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.filter(|r| match r {
|
||||||
|
Ok(_) => true,
|
||||||
|
Err(e) => {
|
||||||
|
error!(e; "export job failed");
|
||||||
|
false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.count();
|
||||||
|
|
||||||
|
info!("success {success}/{db_count} jobs");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Tool for Export {
|
||||||
|
async fn do_work(&self) -> Result<()> {
|
||||||
|
match self.target {
|
||||||
|
ExportTarget::CreateTable => self.export_create_table().await,
|
||||||
|
ExportTarget::TableData => self.export_table_data().await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Split at `-`.
|
||||||
|
fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
||||||
|
let (catalog, schema) = database
|
||||||
|
.split_once('-')
|
||||||
|
.with_context(|| InvalidDatabaseNameSnafu {
|
||||||
|
database: database.to_string(),
|
||||||
|
})?;
|
||||||
|
if schema == "*" {
|
||||||
|
Ok((catalog.to_string(), None))
|
||||||
|
} else {
|
||||||
|
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,7 +17,6 @@ use std::sync::Arc;
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager};
|
||||||
use client::client_manager::DatanodeClients;
|
|
||||||
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
@@ -176,7 +175,7 @@ impl Repl {
|
|||||||
.encode(&plan)
|
.encode(&plan)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
self.database.logical_plan(plan.to_vec(), 0).await
|
self.database.logical_plan(plan.to_vec()).await
|
||||||
} else {
|
} else {
|
||||||
self.database.sql(&sql).await
|
self.database.sql(&sql).await
|
||||||
}
|
}
|
||||||
@@ -250,13 +249,8 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
|
|
||||||
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
let cached_meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
|
||||||
|
|
||||||
let datanode_clients = Arc::new(DatanodeClients::default());
|
let catalog_list =
|
||||||
|
KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend);
|
||||||
let catalog_list = KvBackendCatalogManager::new(
|
|
||||||
cached_meta_backend.clone(),
|
|
||||||
cached_meta_backend.clone(),
|
|
||||||
datanode_clients,
|
|
||||||
);
|
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_list,
|
catalog_list,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -26,7 +27,9 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
|||||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
||||||
use common_meta::key::{RegionDistribution, TableMetaKey};
|
use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
|
||||||
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::range_stream::PaginationStream;
|
use common_meta::range_stream::PaginationStream;
|
||||||
use common_meta::rpc::router::TableRoute;
|
use common_meta::rpc::router::TableRoute;
|
||||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
||||||
@@ -35,8 +38,6 @@ use common_meta::util::get_prefix_end_key;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use etcd_client::Client;
|
use etcd_client::Client;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use meta_srv::service::store::etcd::EtcdStore;
|
|
||||||
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
|
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
||||||
@@ -76,12 +77,12 @@ impl UpgradeCommand {
|
|||||||
skip_schema_keys: self.skip_schema_keys,
|
skip_schema_keys: self.skip_schema_keys,
|
||||||
skip_table_route_keys: self.skip_table_route_keys,
|
skip_table_route_keys: self.skip_table_route_keys,
|
||||||
};
|
};
|
||||||
Ok(Instance::Tool(Box::new(tool)))
|
Ok(Instance::new(Box::new(tool)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MigrateTableMetadata {
|
struct MigrateTableMetadata {
|
||||||
etcd_store: KvStoreRef,
|
etcd_store: KvBackendRef,
|
||||||
dryrun: bool,
|
dryrun: bool,
|
||||||
|
|
||||||
skip_table_global_keys: bool,
|
skip_table_global_keys: bool,
|
||||||
@@ -123,7 +124,7 @@ impl MigrateTableMetadata {
|
|||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
|
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -152,7 +153,7 @@ impl MigrateTableMetadata {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let new_table_value = NextTableRouteValue::new(table_route.region_routes);
|
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
|
||||||
|
|
||||||
let table_id = table_route.table.id as u32;
|
let table_id = table_route.table.id as u32;
|
||||||
let new_key = TableRouteKey::new(table_id);
|
let new_key = TableRouteKey::new(table_id);
|
||||||
@@ -182,7 +183,7 @@ impl MigrateTableMetadata {
|
|||||||
let mut keys = Vec::new();
|
let mut keys = Vec::new();
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -234,7 +235,7 @@ impl MigrateTableMetadata {
|
|||||||
let mut keys = Vec::new();
|
let mut keys = Vec::new();
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end),
|
RangeRequest::new().with_range(key, range_end),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -284,7 +285,7 @@ impl MigrateTableMetadata {
|
|||||||
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
||||||
let mut stream = PaginationStream::new(
|
let mut stream = PaginationStream::new(
|
||||||
KvBackendAdapter::wrap(self.etcd_store.clone()),
|
self.etcd_store.clone(),
|
||||||
RangeRequest::new().with_range(key, range_end.clone()),
|
RangeRequest::new().with_range(key, range_end.clone()),
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
Arc::new(|kv: KeyValue| {
|
Arc::new(|kv: KeyValue| {
|
||||||
@@ -395,6 +396,9 @@ impl MigrateTableMetadata {
|
|||||||
let region_distribution: RegionDistribution =
|
let region_distribution: RegionDistribution =
|
||||||
value.regions_id_map.clone().into_iter().collect();
|
value.regions_id_map.clone().into_iter().collect();
|
||||||
|
|
||||||
|
// TODO(niebayes): properly fetch or construct wal options.
|
||||||
|
let region_wal_options = HashMap::default();
|
||||||
|
|
||||||
let datanode_table_kvs = region_distribution
|
let datanode_table_kvs = region_distribution
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(datanode_id, regions)| {
|
.map(|(datanode_id, regions)| {
|
||||||
@@ -409,6 +413,7 @@ impl MigrateTableMetadata {
|
|||||||
engine: engine.to_string(),
|
engine: engine.to_string(),
|
||||||
region_storage_path: region_storage_path.clone(),
|
region_storage_path: region_storage_path.clone(),
|
||||||
region_options: (&value.table_info.meta.options).into(),
|
region_options: (&value.table_info.meta.options).into(),
|
||||||
|
region_wal_options: region_wal_options.clone(),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,25 +12,41 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use catalog::kvbackend::MetaKvBackend;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging;
|
use common_config::WalConfig;
|
||||||
|
use common_telemetry::{info, logging};
|
||||||
use datanode::config::DatanodeOptions;
|
use datanode::config::DatanodeOptions;
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
||||||
use crate::options::{Options, TopLevelOptions};
|
use crate::options::{CliOptions, Options};
|
||||||
|
use crate::App;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
datanode: Datanode,
|
datanode: Datanode,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub async fn start(&mut self) -> Result<()> {
|
fn new(datanode: Datanode) -> Self {
|
||||||
|
Self { datanode }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl App for Instance {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"greptime-datanode"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&mut self) -> Result<()> {
|
||||||
plugins::start_datanode_plugins(self.datanode.plugins())
|
plugins::start_datanode_plugins(self.datanode.plugins())
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
@@ -38,7 +54,7 @@ impl Instance {
|
|||||||
self.datanode.start().await.context(StartDatanodeSnafu)
|
self.datanode.start().await.context(StartDatanodeSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stop(&self) -> Result<()> {
|
async fn stop(&self) -> Result<()> {
|
||||||
self.datanode
|
self.datanode
|
||||||
.shutdown()
|
.shutdown()
|
||||||
.await
|
.await
|
||||||
@@ -57,8 +73,8 @@ impl Command {
|
|||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
pub fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||||
self.subcmd.load_options(top_level_opts)
|
self.subcmd.load_options(cli_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,9 +90,9 @@ impl SubCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(top_level_opts),
|
SubCommand::Start(cmd) => cmd.load_options(cli_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,13 +105,15 @@ struct StartCommand {
|
|||||||
rpc_addr: Option<String>,
|
rpc_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
rpc_hostname: Option<String>,
|
rpc_hostname: Option<String>,
|
||||||
#[clap(long, multiple = true, value_delimiter = ',')]
|
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||||
metasrv_addr: Option<Vec<String>>,
|
metasrv_addr: Option<Vec<String>>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
data_home: Option<String>,
|
data_home: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
|
wal_dir: Option<String>,
|
||||||
|
#[clap(long)]
|
||||||
http_addr: Option<String>,
|
http_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
http_timeout: Option<u64>,
|
http_timeout: Option<u64>,
|
||||||
@@ -104,19 +122,19 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
|
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
|
||||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
let mut opts: DatanodeOptions = Options::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
DatanodeOptions::env_list_keys(),
|
DatanodeOptions::env_list_keys(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if let Some(dir) = top_level_opts.log_dir {
|
if let Some(dir) = &cli_options.log_dir {
|
||||||
opts.logging.dir = dir;
|
opts.logging.dir = dir.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if top_level_opts.log_level.is_some() {
|
if cli_options.log_level.is_some() {
|
||||||
opts.logging.level = top_level_opts.log_level;
|
opts.logging.level = cli_options.log_level.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
@@ -149,6 +167,20 @@ impl StartCommand {
|
|||||||
opts.storage.data_home = data_home.clone();
|
opts.storage.data_home = data_home.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// `wal_dir` only affects raft-engine config.
|
||||||
|
if let Some(wal_dir) = &self.wal_dir
|
||||||
|
&& let WalConfig::RaftEngine(raft_engine_config) = &mut opts.wal
|
||||||
|
{
|
||||||
|
if raft_engine_config
|
||||||
|
.dir
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|original_dir| original_dir != wal_dir)
|
||||||
|
{
|
||||||
|
info!("The wal dir of raft-engine is altered to {wal_dir}");
|
||||||
|
}
|
||||||
|
raft_engine_config.dir.replace(wal_dir.clone());
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(http_addr) = &self.http_addr {
|
if let Some(http_addr) = &self.http_addr {
|
||||||
opts.http.addr = http_addr.clone();
|
opts.http.addr = http_addr.clone();
|
||||||
}
|
}
|
||||||
@@ -171,12 +203,32 @@ impl StartCommand {
|
|||||||
logging::info!("Datanode start command: {:#?}", self);
|
logging::info!("Datanode start command: {:#?}", self);
|
||||||
logging::info!("Datanode options: {:#?}", opts);
|
logging::info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
let datanode = DatanodeBuilder::new(opts, None, plugins)
|
let node_id = opts
|
||||||
|
.node_id
|
||||||
|
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||||
|
|
||||||
|
let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||||
|
msg: "'meta_client_options'",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let meta_client = datanode::heartbeat::new_metasrv_client(node_id, meta_config)
|
||||||
|
.await
|
||||||
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
|
let meta_backend = Arc::new(MetaKvBackend {
|
||||||
|
client: Arc::new(meta_client.clone()),
|
||||||
|
});
|
||||||
|
|
||||||
|
let datanode = DatanodeBuilder::new(opts, plugins)
|
||||||
|
.with_meta_client(meta_client)
|
||||||
|
.with_kv_backend(meta_backend)
|
||||||
|
.enable_region_server_service()
|
||||||
|
.enable_http_service()
|
||||||
.build()
|
.build()
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
Ok(Instance { datanode })
|
Ok(Instance::new(datanode))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,13 +237,13 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use datanode::config::{CompactionConfig, FileConfig, ObjectStoreConfig, RegionManifestConfig};
|
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||||
|
use servers::heartbeat_options::HeartbeatOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::ENV_VAR_SEP;
|
use crate::options::{CliOptions, ENV_VAR_SEP};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_read_from_config_file() {
|
fn test_read_from_config_file() {
|
||||||
@@ -204,14 +256,18 @@ mod tests {
|
|||||||
rpc_hostname = "127.0.0.1"
|
rpc_hostname = "127.0.0.1"
|
||||||
rpc_runtime_size = 8
|
rpc_runtime_size = 8
|
||||||
|
|
||||||
|
[heartbeat]
|
||||||
|
interval = "300ms"
|
||||||
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
metasrv_addrs = ["127.0.0.1:3002"]
|
metasrv_addrs = ["127.0.0.1:3002"]
|
||||||
timeout_millis = 3000
|
timeout = "3s"
|
||||||
connect_timeout_millis = 5000
|
connect_timeout = "5s"
|
||||||
ddl_timeout_millis= 10000
|
ddl_timeout = "10s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
|
provider = "raft_engine"
|
||||||
dir = "/other/wal"
|
dir = "/other/wal"
|
||||||
file_size = "1GB"
|
file_size = "1GB"
|
||||||
purge_threshold = "50GB"
|
purge_threshold = "50GB"
|
||||||
@@ -220,18 +276,17 @@ mod tests {
|
|||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
[storage]
|
[storage]
|
||||||
type = "File"
|
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
type = "File"
|
||||||
|
|
||||||
[storage.compaction]
|
[[storage.providers]]
|
||||||
max_inflight_tasks = 3
|
type = "Gcs"
|
||||||
max_files_in_level0 = 7
|
bucket = "foo"
|
||||||
max_purge_tasks = 32
|
endpoint = "bar"
|
||||||
|
|
||||||
[storage.manifest]
|
[[storage.providers]]
|
||||||
checkpoint_margin = 9
|
type = "S3"
|
||||||
gc_duration = '7s'
|
bucket = "foo"
|
||||||
compress = true
|
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
@@ -244,56 +299,60 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(options) = cmd.load_options(TopLevelOptions::default()).unwrap()
|
let Options::Datanode(options) = cmd.load_options(&CliOptions::default()).unwrap() else {
|
||||||
else {
|
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||||
assert_eq!(Some(42), options.node_id);
|
assert_eq!(Some(42), options.node_id);
|
||||||
|
|
||||||
assert_eq!(Duration::from_secs(600), options.wal.purge_interval);
|
let WalConfig::RaftEngine(raft_engine_config) = options.wal else {
|
||||||
assert_eq!(1024 * 1024 * 1024, options.wal.file_size.0);
|
unreachable!()
|
||||||
assert_eq!(1024 * 1024 * 1024 * 50, options.wal.purge_threshold.0);
|
};
|
||||||
assert!(!options.wal.sync_write);
|
assert_eq!("/other/wal", raft_engine_config.dir.unwrap());
|
||||||
|
assert_eq!(Duration::from_secs(600), raft_engine_config.purge_interval);
|
||||||
|
assert_eq!(1024 * 1024 * 1024, raft_engine_config.file_size.0);
|
||||||
|
assert_eq!(
|
||||||
|
1024 * 1024 * 1024 * 50,
|
||||||
|
raft_engine_config.purge_threshold.0
|
||||||
|
);
|
||||||
|
assert!(!raft_engine_config.sync_write);
|
||||||
|
|
||||||
|
let HeartbeatOptions {
|
||||||
|
interval: heart_beat_interval,
|
||||||
|
..
|
||||||
|
} = options.heartbeat;
|
||||||
|
|
||||||
|
assert_eq!(300, heart_beat_interval.as_millis());
|
||||||
|
|
||||||
let MetaClientOptions {
|
let MetaClientOptions {
|
||||||
metasrv_addrs: metasrv_addr,
|
metasrv_addrs: metasrv_addr,
|
||||||
timeout_millis,
|
timeout,
|
||||||
connect_timeout_millis,
|
connect_timeout,
|
||||||
|
ddl_timeout,
|
||||||
tcp_nodelay,
|
tcp_nodelay,
|
||||||
ddl_timeout_millis,
|
|
||||||
..
|
..
|
||||||
} = options.meta_client.unwrap();
|
} = options.meta_client.unwrap();
|
||||||
|
|
||||||
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
assert_eq!(vec!["127.0.0.1:3002".to_string()], metasrv_addr);
|
||||||
assert_eq!(5000, connect_timeout_millis);
|
assert_eq!(5000, connect_timeout.as_millis());
|
||||||
assert_eq!(10000, ddl_timeout_millis);
|
assert_eq!(10000, ddl_timeout.as_millis());
|
||||||
assert_eq!(3000, timeout_millis);
|
assert_eq!(3000, timeout.as_millis());
|
||||||
assert!(tcp_nodelay);
|
assert!(tcp_nodelay);
|
||||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
&options.storage.store,
|
&options.storage.store,
|
||||||
ObjectStoreConfig::File(FileConfig { .. })
|
ObjectStoreConfig::File(FileConfig { .. })
|
||||||
));
|
));
|
||||||
|
assert_eq!(options.storage.providers.len(), 2);
|
||||||
assert_eq!(
|
assert!(matches!(
|
||||||
CompactionConfig {
|
options.storage.providers[0],
|
||||||
max_inflight_tasks: 3,
|
ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||||
max_files_in_level0: 7,
|
));
|
||||||
max_purge_tasks: 32,
|
assert!(matches!(
|
||||||
sst_write_buffer_size: ReadableSize::mb(8),
|
options.storage.providers[1],
|
||||||
},
|
ObjectStoreConfig::S3(S3Config { .. })
|
||||||
options.storage.compaction,
|
));
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
RegionManifestConfig {
|
|
||||||
checkpoint_margin: Some(9),
|
|
||||||
gc_duration: Some(Duration::from_secs(7)),
|
|
||||||
compress: true
|
|
||||||
},
|
|
||||||
options.storage.manifest,
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!("debug", options.logging.level.unwrap());
|
assert_eq!("debug", options.logging.level.unwrap());
|
||||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||||
@@ -302,7 +361,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_cmd() {
|
fn test_try_from_cmd() {
|
||||||
if let Options::Datanode(opt) = StartCommand::default()
|
if let Options::Datanode(opt) = StartCommand::default()
|
||||||
.load_options(TopLevelOptions::default())
|
.load_options(&CliOptions::default())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
{
|
{
|
||||||
assert_eq!(Mode::Standalone, opt.mode)
|
assert_eq!(Mode::Standalone, opt.mode)
|
||||||
@@ -313,7 +372,7 @@ mod tests {
|
|||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(TopLevelOptions::default())
|
.load_options(&CliOptions::default())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
{
|
{
|
||||||
assert_eq!(Mode::Distributed, opt.mode)
|
assert_eq!(Mode::Distributed, opt.mode)
|
||||||
@@ -323,7 +382,7 @@ mod tests {
|
|||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(TopLevelOptions::default())
|
.load_options(&CliOptions::default())
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
|
||||||
@@ -331,18 +390,21 @@ mod tests {
|
|||||||
node_id: Some(42),
|
node_id: Some(42),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(TopLevelOptions::default())
|
.load_options(&CliOptions::default())
|
||||||
.is_ok());
|
.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_level_options() {
|
fn test_load_log_options_from_cli() {
|
||||||
let cmd = StartCommand::default();
|
let cmd = StartCommand::default();
|
||||||
|
|
||||||
let options = cmd
|
let options = cmd
|
||||||
.load_options(TopLevelOptions {
|
.load_options(&CliOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -363,26 +425,21 @@ mod tests {
|
|||||||
rpc_runtime_size = 8
|
rpc_runtime_size = 8
|
||||||
|
|
||||||
[meta_client]
|
[meta_client]
|
||||||
timeout_millis = 3000
|
timeout = "3s"
|
||||||
connect_timeout_millis = 5000
|
connect_timeout = "5s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
[wal]
|
[wal]
|
||||||
|
provider = "raft_engine"
|
||||||
file_size = "1GB"
|
file_size = "1GB"
|
||||||
purge_threshold = "50GB"
|
purge_threshold = "50GB"
|
||||||
purge_interval = "10m"
|
purge_interval = "5m"
|
||||||
read_batch_size = 128
|
|
||||||
sync_write = false
|
sync_write = false
|
||||||
|
|
||||||
[storage]
|
[storage]
|
||||||
type = "File"
|
type = "File"
|
||||||
data_home = "/tmp/greptimedb/"
|
data_home = "/tmp/greptimedb/"
|
||||||
|
|
||||||
[storage.compaction]
|
|
||||||
max_inflight_tasks = 3
|
|
||||||
max_files_in_level0 = 7
|
|
||||||
max_purge_tasks = 32
|
|
||||||
|
|
||||||
[logging]
|
[logging]
|
||||||
level = "debug"
|
level = "debug"
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
@@ -393,26 +450,24 @@ mod tests {
|
|||||||
temp_env::with_vars(
|
temp_env::with_vars(
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
// storage.manifest.gc_duration = 9s
|
// wal.purge_interval = 1m
|
||||||
[
|
[
|
||||||
env_prefix.to_string(),
|
env_prefix.to_string(),
|
||||||
"storage".to_uppercase(),
|
"wal".to_uppercase(),
|
||||||
"manifest".to_uppercase(),
|
"purge_interval".to_uppercase(),
|
||||||
"gc_duration".to_uppercase(),
|
|
||||||
]
|
]
|
||||||
.join(ENV_VAR_SEP),
|
.join(ENV_VAR_SEP),
|
||||||
Some("9s"),
|
Some("1m"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
// storage.compaction.max_purge_tasks = 99
|
// wal.read_batch_size = 100
|
||||||
[
|
[
|
||||||
env_prefix.to_string(),
|
env_prefix.to_string(),
|
||||||
"storage".to_uppercase(),
|
"wal".to_uppercase(),
|
||||||
"compaction".to_uppercase(),
|
"read_batch_size".to_uppercase(),
|
||||||
"max_purge_tasks".to_uppercase(),
|
|
||||||
]
|
]
|
||||||
.join(ENV_VAR_SEP),
|
.join(ENV_VAR_SEP),
|
||||||
Some("99"),
|
Some("100"),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||||
@@ -428,21 +483,21 @@ mod tests {
|
|||||||
|| {
|
|| {
|
||||||
let command = StartCommand {
|
let command = StartCommand {
|
||||||
config_file: Some(file.path().to_str().unwrap().to_string()),
|
config_file: Some(file.path().to_str().unwrap().to_string()),
|
||||||
|
wal_dir: Some("/other/wal/dir".to_string()),
|
||||||
env_prefix: env_prefix.to_string(),
|
env_prefix: env_prefix.to_string(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(opts) =
|
let Options::Datanode(opts) = command.load_options(&CliOptions::default()).unwrap()
|
||||||
command.load_options(TopLevelOptions::default()).unwrap()
|
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(
|
let WalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||||
opts.storage.manifest.gc_duration,
|
unreachable!()
|
||||||
Some(Duration::from_secs(9))
|
};
|
||||||
);
|
assert_eq!(raft_engine_config.read_batch_size, 100);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
opts.meta_client.unwrap().metasrv_addrs,
|
opts.meta_client.unwrap().metasrv_addrs,
|
||||||
vec![
|
vec![
|
||||||
@@ -453,16 +508,16 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Should be read from config file, config file > env > default values.
|
// Should be read from config file, config file > env > default values.
|
||||||
assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
|
assert_eq!(
|
||||||
|
raft_engine_config.purge_interval,
|
||||||
|
Duration::from_secs(60 * 5)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should be read from cli, cli > config file > env > default values.
|
||||||
|
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
|
||||||
|
|
||||||
// Should be default value.
|
// Should be default value.
|
||||||
assert_eq!(
|
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
||||||
opts.storage.manifest.checkpoint_margin,
|
|
||||||
DatanodeOptions::default()
|
|
||||||
.storage
|
|
||||||
.manifest
|
|
||||||
.checkpoint_margin
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use config::ConfigError;
|
use config::ConfigError;
|
||||||
@@ -37,6 +37,36 @@ pub enum Error {
|
|||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to init DDL manager"))]
|
||||||
|
InitDdlManager {
|
||||||
|
location: Location,
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to init default timezone"))]
|
||||||
|
InitTimezone {
|
||||||
|
location: Location,
|
||||||
|
source: common_time::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to start procedure manager"))]
|
||||||
|
StartProcedureManager {
|
||||||
|
location: Location,
|
||||||
|
source: common_procedure::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to stop procedure manager"))]
|
||||||
|
StopProcedureManager {
|
||||||
|
location: Location,
|
||||||
|
source: common_procedure::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to start wal options allocator"))]
|
||||||
|
StartWalOptionsAllocator {
|
||||||
|
location: Location,
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start datanode"))]
|
#[snafu(display("Failed to start datanode"))]
|
||||||
StartDatanode {
|
StartDatanode {
|
||||||
location: Location,
|
location: Location,
|
||||||
@@ -174,12 +204,51 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to connect server at {addr}"))]
|
||||||
|
ConnectServer {
|
||||||
|
addr: String,
|
||||||
|
source: client::error::Error,
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to serde json"))]
|
#[snafu(display("Failed to serde json"))]
|
||||||
SerdeJson {
|
SerdeJson {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: serde_json::error::Error,
|
error: serde_json::error::Error,
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Expect data from output, but got another thing"))]
|
||||||
|
NotDataFromOutput { location: Location },
|
||||||
|
|
||||||
|
#[snafu(display("Empty result from output"))]
|
||||||
|
EmptyResult { location: Location },
|
||||||
|
|
||||||
|
#[snafu(display("Failed to manipulate file"))]
|
||||||
|
FileIo {
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: std::io::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Invalid database name: {}", database))]
|
||||||
|
InvalidDatabaseName {
|
||||||
|
location: Location,
|
||||||
|
database: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to create directory {}", dir))]
|
||||||
|
CreateDir {
|
||||||
|
dir: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: std::io::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Other error"))]
|
||||||
|
Other {
|
||||||
|
source: BoxedError,
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -195,15 +264,26 @@ impl ErrorExt for Error {
|
|||||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||||
Error::IterStream { source, .. } | Error::InitMetadata { source, .. } => {
|
|
||||||
source.status_code()
|
Error::IterStream { source, .. }
|
||||||
}
|
| Error::InitMetadata { source, .. }
|
||||||
|
| Error::InitDdlManager { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
Error::ConnectServer { source, .. } => source.status_code(),
|
||||||
Error::MissingConfig { .. }
|
Error::MissingConfig { .. }
|
||||||
| Error::LoadLayeredConfig { .. }
|
| Error::LoadLayeredConfig { .. }
|
||||||
| Error::IllegalConfig { .. }
|
| Error::IllegalConfig { .. }
|
||||||
| Error::InvalidReplCommand { .. }
|
| Error::InvalidReplCommand { .. }
|
||||||
| Error::ConnectEtcd { .. } => StatusCode::InvalidArguments,
|
| Error::InitTimezone { .. }
|
||||||
|
| Error::ConnectEtcd { .. }
|
||||||
|
| Error::NotDataFromOutput { .. }
|
||||||
|
| Error::CreateDir { .. }
|
||||||
|
| Error::EmptyResult { .. }
|
||||||
|
| Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
|
Error::StartProcedureManager { source, .. }
|
||||||
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
|
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||||
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
|
||||||
Error::RequestDatabase { source, .. } => source.status_code(),
|
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||||
Error::CollectRecordBatches { source, .. }
|
Error::CollectRecordBatches { source, .. }
|
||||||
@@ -215,7 +295,9 @@ impl ErrorExt for Error {
|
|||||||
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
Error::SubstraitEncodeLogicalPlan { source, .. } => source.status_code(),
|
||||||
Error::StartCatalogManager { source, .. } => source.status_code(),
|
Error::StartCatalogManager { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
|
Error::Other { source, .. } => source.status_code(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user