mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 14:40:01 +00:00
Compare commits
234 Commits
create-vie
...
avoid-quer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bfba48755 | ||
|
|
457998f0fe | ||
|
|
b02c256157 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 | ||
|
|
05751084e7 | ||
|
|
8b6596faa0 | ||
|
|
eab309ff7e | ||
|
|
7de336f087 | ||
|
|
6e9a9dc333 | ||
|
|
848bd7e553 | ||
|
|
f0effd2680 | ||
|
|
aafb468547 | ||
|
|
4aa756c896 | ||
|
|
d3860671a8 | ||
|
|
9dd6e033a7 | ||
|
|
097f62f459 | ||
|
|
048368fd87 | ||
|
|
f9db5ff0d6 | ||
|
|
20ce7d428d | ||
|
|
75bddc0bf5 | ||
|
|
c78043d526 | ||
|
|
297105266b | ||
|
|
1de17aec74 | ||
|
|
389ded93d1 | ||
|
|
af486ec0d0 | ||
|
|
25d64255a3 | ||
|
|
3790020d78 | ||
|
|
5df3d4e5da | ||
|
|
af670df515 | ||
|
|
a58256d4d3 | ||
|
|
466f7c6448 | ||
|
|
0101657649 | ||
|
|
a3a2c8d063 | ||
|
|
dfc1acbb2a | ||
|
|
0d055b6ee6 | ||
|
|
614643ef7b | ||
|
|
b90b7adf6f | ||
|
|
418090b464 | ||
|
|
090b59e8d6 | ||
|
|
9e1af79637 | ||
|
|
9800807fe5 | ||
|
|
b86d79b906 | ||
|
|
e070ba3c32 | ||
|
|
43bf7bffd0 | ||
|
|
56aed6e6ff | ||
|
|
47785756e5 | ||
|
|
0aa523cd8c | ||
|
|
7a8222dd97 | ||
|
|
40c585890a | ||
|
|
da925e956e | ||
|
|
d7ade3c854 | ||
|
|
179c8c716c | ||
|
|
19543f9819 | ||
|
|
533ada70ca | ||
|
|
c50ff23194 | ||
|
|
d7f1150098 | ||
|
|
82c3eca25e | ||
|
|
df13832a59 | ||
|
|
7da92eb9eb | ||
|
|
c71298d3d5 | ||
|
|
de594833ac | ||
|
|
6a9a92931d | ||
|
|
11ad5b3ed1 | ||
|
|
b8354bbb55 | ||
|
|
258675b75e | ||
|
|
11a08cb272 | ||
|
|
e9b178b8b9 | ||
|
|
3477fde0e5 | ||
|
|
9baa431656 | ||
|
|
e2a1cb5840 | ||
|
|
f696f41a02 | ||
|
|
0168d43d60 | ||
|
|
e372e25e30 | ||
|
|
ca409a732f | ||
|
|
5c0a530ad1 | ||
|
|
4b030456f6 | ||
|
|
f93b5b19f0 | ||
|
|
669a6d84e9 | ||
|
|
a45017ad71 | ||
|
|
0d9e71b653 | ||
|
|
93f178f3ad | ||
|
|
9f4a6c6fe2 | ||
|
|
c915916b62 | ||
|
|
dff7ba7598 | ||
|
|
fe34ebf770 | ||
|
|
a1c51a5885 | ||
|
|
63a8d293a1 | ||
|
|
6c621b7fcf | ||
|
|
529e344450 | ||
|
|
2a169f9364 | ||
|
|
97eb196699 | ||
|
|
cfae276d37 | ||
|
|
09129a911e | ||
|
|
15d7b9755e | ||
|
|
72897a20e3 | ||
|
|
c04d02460f | ||
|
|
4ca7ac7632 | ||
|
|
a260ba3ee7 | ||
|
|
efd3f04b7c | ||
|
|
f16ce3ca27 | ||
|
|
6214180ecd | ||
|
|
00e21e2021 | ||
|
|
494ce65729 | ||
|
|
e15294db41 | ||
|
|
be1eb4efb7 | ||
|
|
9d12496aaf | ||
|
|
5d8084a32f | ||
|
|
60eb5de3f1 | ||
|
|
a0be7198f9 | ||
|
|
6ab3aeb142 | ||
|
|
590aedd466 | ||
|
|
27e376e892 | ||
|
|
36c41d129c | ||
|
|
89da42dbc1 | ||
|
|
04852aa27e | ||
|
|
d0820bb26d | ||
|
|
fa6c371380 | ||
|
|
9aa2182cb2 | ||
|
|
bca2e393bf | ||
|
|
b1ef327bac | ||
|
|
115c74791d | ||
|
|
aec5cca2c7 | ||
|
|
06e1c43743 | ||
|
|
9d36c31209 | ||
|
|
c91132bd14 | ||
|
|
25e9076f5b | ||
|
|
08945f128b | ||
|
|
5a0629eaa0 | ||
|
|
89dbf6ddd2 | ||
|
|
66aa08d815 | ||
|
|
b8a325d18c | ||
|
|
ed95e99556 | ||
|
|
5545a8b023 | ||
|
|
5140d247e3 | ||
|
|
f995f6099f | ||
|
|
7de62ef5d0 | ||
|
|
0e05f85a9d | ||
|
|
a6a702de4e | ||
|
|
d99746385b | ||
|
|
9d8f72d611 | ||
|
|
c07a1babd5 | ||
|
|
cc8d6b1200 | ||
|
|
5274806108 | ||
|
|
6e1cc1df55 | ||
|
|
65f80af9a9 | ||
|
|
a68072cb21 | ||
|
|
71c1c7ca24 | ||
|
|
1b5862223c | ||
|
|
c0be0c30de | ||
|
|
154f561da1 | ||
|
|
aa2934b422 | ||
|
|
1b93a026c2 | ||
|
|
530353785c | ||
|
|
573c19be32 | ||
|
|
f3b68253c2 | ||
|
|
6e9e8fad26 | ||
|
|
6e12e1b84b | ||
|
|
7d447c20c5 | ||
|
|
9c3b9600ca | ||
|
|
73fe075049 | ||
|
|
2748cec7e2 | ||
|
|
65d47bab56 | ||
|
|
f6e2039eb8 | ||
|
|
3b89b9ddd8 | ||
|
|
695746193b | ||
|
|
573d369f77 | ||
|
|
e6eca8ca0c | ||
|
|
e84b1eefdf | ||
|
|
777bc3b89d | ||
|
|
81f3007f6f | ||
|
|
863ee608ca | ||
|
|
20cbc039e6 | ||
|
|
d11b1fa389 | ||
|
|
a0f4881c6e | ||
|
|
aba5e41799 | ||
|
|
371d4cf9f5 | ||
|
|
8e3515d396 | ||
|
|
701aba9cdb | ||
|
|
b493ea1b38 | ||
|
|
336db38ce9 | ||
|
|
c387687262 | ||
|
|
7ef18c0915 | ||
|
|
1bbde15a15 | ||
|
|
3dac7cbe37 | ||
|
|
08263995f6 | ||
|
|
c0b909330a | ||
|
|
dadee99d69 | ||
|
|
f29aebf89f | ||
|
|
e154dc5fd4 | ||
|
|
ed8b13689e | ||
|
|
3112ced9c0 | ||
|
|
e410192560 | ||
|
|
eb3d2ca759 | ||
|
|
934c7e3fef | ||
|
|
d8ea7c5585 | ||
|
|
77fc1e6de0 | ||
|
|
4eadd9f8a8 | ||
|
|
1ec595134d | ||
|
|
9206f60b28 | ||
|
|
2d0f493040 | ||
|
|
bba3108e0d | ||
|
|
9524ec83bc | ||
|
|
e0b5f52c2a | ||
|
|
1272bc9afc | ||
|
|
df01ac05a1 | ||
|
|
659d34a170 | ||
|
|
62037ee4c8 | ||
|
|
8d229dda98 | ||
|
|
42e7403fcc | ||
|
|
20a933e395 | ||
|
|
b619950c70 | ||
|
|
4685b59ef1 | ||
|
|
86a989517e | ||
|
|
0aaf7621bd | ||
|
|
924c52af7c | ||
|
|
f5e5a89e44 | ||
|
|
778e195f07 | ||
|
|
f764fd5847 | ||
|
|
19a9035f4b | ||
|
|
96c01a3bf0 | ||
|
|
bf21527f18 | ||
|
|
9e1441e48b | ||
|
|
eeb4e26c71 | ||
|
|
7ca0fa52d4 | ||
|
|
443722597b | ||
|
|
d4b814f698 | ||
|
|
d0b2a11f2b |
@@ -24,3 +24,7 @@ GT_KAFKA_ENDPOINTS = localhost:9092
|
||||
|
||||
# Setting for fuzz tests
|
||||
GT_MYSQL_ADDR = localhost:4002
|
||||
|
||||
# Setting for unstable fuzz tests
|
||||
GT_FUZZ_BINARY_PATH=/path/to/
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Is something not working? Help us fix it!
|
||||
labels: [ "bug" ]
|
||||
labels: [ "C-bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
||||
url: https://greptime.com/slack
|
||||
about: Get free help from the Greptime community
|
||||
- name: Greptime Community Discussion
|
||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||
url: https://github.com/greptimeTeam/discussions
|
||||
about: Get free help from the Greptime community
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Enhancement
|
||||
description: Suggest an enhancement to existing functionality
|
||||
labels: [ "enhancement" ]
|
||||
labels: [ "C-enhancement" ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: type
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: New Feature
|
||||
description: Suggest a new feature for GreptimeDB
|
||||
labels: [ "feature request" ]
|
||||
labels: [ "C-feature" ]
|
||||
body:
|
||||
- type: markdown
|
||||
id: info
|
||||
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build and push CI Docker image
|
||||
description: Build and push CI Docker image to local registry
|
||||
inputs:
|
||||
binary_path:
|
||||
default: "./bin"
|
||||
description: "Binary path"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push to local registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
||||
push: true
|
||||
tags: localhost:5001/greptime/greptimedb:latest
|
||||
build-args: |
|
||||
BINARY_PATH=${{ inputs.binary_path }}
|
||||
@@ -22,15 +22,15 @@ inputs:
|
||||
build-dev-builder-ubuntu:
|
||||
description: Build dev-builder-ubuntu image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-centos:
|
||||
description: Build dev-builder-centos image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
build-dev-builder-android:
|
||||
description: Build dev-builder-android image
|
||||
required: false
|
||||
default: 'true'
|
||||
default: "true"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -47,7 +47,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=ubuntu \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=all \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -58,7 +58,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=centos \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=true \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }}
|
||||
@@ -72,5 +72,5 @@ runs:
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ inputs:
|
||||
dev-mode:
|
||||
description: Enable dev mode, only build standard greptime
|
||||
required: false
|
||||
default: 'false'
|
||||
default: "false"
|
||||
working-dir:
|
||||
description: Working directory to build the artifacts
|
||||
required: false
|
||||
@@ -68,7 +68,7 @@ runs:
|
||||
|
||||
- name: Build greptime on centos base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard
|
||||
@@ -79,7 +79,7 @@ runs:
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64.
|
||||
with:
|
||||
base-image: android
|
||||
artifacts-dir: greptime-android-arm64-${{ inputs.version }}
|
||||
|
||||
@@ -26,8 +26,6 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
@@ -61,6 +59,9 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
|
||||
12
.github/actions/fuzz-test/action.yaml
vendored
12
.github/actions/fuzz-test/action.yaml
vendored
@@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service'
|
||||
inputs:
|
||||
target:
|
||||
description: "The fuzz target to test"
|
||||
required: true
|
||||
max-total-time:
|
||||
description: "Max total time(secs)"
|
||||
required: true
|
||||
unstable:
|
||||
default: 'false'
|
||||
description: "Enable unstable feature"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run Fuzz Test
|
||||
shell: bash
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
|
||||
env:
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
|
||||
|
||||
|
||||
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Setup cyborg environment
|
||||
description: Setup cyborg environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
shell: bash
|
||||
run: pnpm tsx -v
|
||||
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
85
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
85
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Setup GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
frontend-replicas:
|
||||
default: 2
|
||||
description: "Number of Frontend replicas"
|
||||
datanode-replicas:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 3
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||
description: "Etcd endpoints"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
-n my-greptimedb \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
kubectl get pods -n my-greptimedb
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
10
.github/actions/setup-kind/action.yml
vendored
Normal file
10
.github/actions/setup-kind/action.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/kind-with-registry.sh
|
||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,3 +57,14 @@ runs:
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kind-logs
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "breaking change",
|
||||
"color": "D93F0B"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(?:(?!!:).)*$",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
],
|
||||
"alwaysPassCI": true
|
||||
}
|
||||
}
|
||||
12
.github/pr-title-checker-config.json
vendored
12
.github/pr-title-checker-config.json
vendored
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "Invalid PR Title",
|
||||
"color": "B60205"
|
||||
},
|
||||
"CHECKS": {
|
||||
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
|
||||
"ignoreLabels": [
|
||||
"ignore-title"
|
||||
]
|
||||
}
|
||||
}
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
|
||||
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# 1. Create registry container unless it already exists
|
||||
reg_name='kind-registry'
|
||||
reg_port='5001'
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||
docker run \
|
||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
||||
registry:2
|
||||
fi
|
||||
|
||||
# 2. Create kind cluster with containerd registry config dir enabled
|
||||
# TODO: kind will eventually enable this by default and this patch will
|
||||
# be unnecessary.
|
||||
#
|
||||
# See:
|
||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
cat <<EOF | kind create cluster --wait 2m --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
EOF
|
||||
|
||||
# 3. Add the registry config to the nodes
|
||||
#
|
||||
# This is necessary because localhost resolves to loopback addresses that are
|
||||
# network-namespace local.
|
||||
# In other words: localhost in the container is not localhost on the host.
|
||||
#
|
||||
# We want a consistent name that works from both ends, so we tell containerd to
|
||||
# alias localhost:${reg_port} to the registry container when pulling images
|
||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||
for node in $(kind get nodes); do
|
||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||
[host."http://${reg_name}:5000"]
|
||||
EOF
|
||||
done
|
||||
|
||||
# 4. Connect the registry to the cluster network if not already connected
|
||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||
docker network connect "kind" "${reg_name}"
|
||||
fi
|
||||
|
||||
# 5. Document the local registry
|
||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-registry-hosting
|
||||
namespace: kube-public
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: "localhost:${reg_port}"
|
||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||
EOF
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
20
.github/workflows/dev-build.yml
vendored
20
.github/workflows/dev-build.yml
vendored
@@ -82,6 +82,9 @@ env:
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -321,7 +324,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -330,16 +333,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy dev build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy dev build failed result
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
255
.github/workflows/develop.yml
vendored
255
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
@@ -38,19 +38,26 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
- uses: crate-ci/typos@master
|
||||
- name: Check the config docs
|
||||
run: |
|
||||
make config-docs && \
|
||||
git diff --name-only --exit-code ./config/config.md \
|
||||
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
name: Check
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest, ubuntu-20.04 ]
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -107,9 +114,13 @@ jobs:
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-binaries"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
run: cargo build --bin greptime --bin sqlness-runner
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -130,7 +141,52 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
|
||||
unstable-fuzztest:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "unstable_fuzz_create_table_standalone" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
@@ -156,15 +212,156 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
- name: Build Fuzz Test
|
||||
shell: bash
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
cd tests-fuzz &
|
||||
cargo install cargo-gc-bin &
|
||||
cargo gc &
|
||||
cd ..
|
||||
- name: Run Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
unstable: 'true'
|
||||
- name: Upload unstable fuzz test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unstable-fuzz-logs
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
|
||||
build-greptime-ci:
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo build --bin greptime --profile ci
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/ci/greptime bin
|
||||
- name: Print greptime binaries info
|
||||
run: ls -lh bin
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, Disk)
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: tar -xvf ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
@@ -184,13 +381,13 @@ jobs:
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/greptime-*.log
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
sqlness-kafka-wal:
|
||||
@@ -214,13 +411,13 @@ jobs:
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/greptime-*.log
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -264,7 +461,7 @@ jobs:
|
||||
# Shares with `Check` job
|
||||
shared-key: "check-lint"
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
run: make clippy
|
||||
|
||||
coverage:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -330,20 +527,20 @@ jobs:
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
compat:
|
||||
name: Compatibility Test
|
||||
needs: build
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: |
|
||||
mkdir -p ./bins/current
|
||||
tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
- run: ./tests/compat/test-compat.sh 0.6.0
|
||||
# compat:
|
||||
# name: Compatibility Test
|
||||
# needs: build
|
||||
# runs-on: ubuntu-20.04
|
||||
# timeout-minutes: 60
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Download pre-built binaries
|
||||
# uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# name: bins
|
||||
# path: .
|
||||
# - name: Unzip binaries
|
||||
# run: |
|
||||
# mkdir -p ./bins/current
|
||||
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
|
||||
# - run: ./tests/compat/test-compat.sh 0.6.0
|
||||
|
||||
39
.github/workflows/doc-issue.yml
vendored
39
.github/workflows/doc-issue.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
36
.github/workflows/doc-label.yml
vendored
36
.github/workflows/doc-label.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.4
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
- name: Check doc labels
|
||||
uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
22
.github/workflows/docbot.yml
vendored
Normal file
22
.github/workflows/docbot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Follow Up Docs
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||
env:
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
9
.github/workflows/docs.yml
vendored
9
.github/workflows/docs.yml
vendored
@@ -34,7 +34,14 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
- uses: crate-ci/typos@master
|
||||
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check License Header
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: korandoru/hawkeye@v5
|
||||
|
||||
check:
|
||||
name: Check
|
||||
|
||||
16
.github/workflows/license.yaml
vendored
16
.github/workflows/license.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: License checker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
jobs:
|
||||
license-header-check:
|
||||
runs-on: ubuntu-20.04
|
||||
name: license-header-check
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check License Header
|
||||
uses: korandoru/hawkeye@v5
|
||||
27
.github/workflows/nightly-build.yml
vendored
27
.github/workflows/nightly-build.yml
vendored
@@ -66,6 +66,13 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||
IMAGE_NAME: greptimedb-nightly
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -188,6 +195,7 @@ jobs:
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -220,7 +228,7 @@ jobs:
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -285,7 +293,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -294,16 +302,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
- name: Notify nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
- name: Notify nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
108
.github/workflows/nightly-ci.yml
vendored
108
.github/workflows/nightly-ci.yml
vendored
@@ -1,8 +1,6 @@
|
||||
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
- cron: "0 23 * * 1-5"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -12,19 +10,38 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest-8-cores ]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -35,14 +52,6 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -52,15 +61,20 @@ jobs:
|
||||
retention-days: 3
|
||||
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
@@ -73,7 +87,7 @@ jobs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
@@ -83,18 +97,62 @@ jobs:
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
- name: Set check result
|
||||
id: set-check-result
|
||||
run: |
|
||||
echo "check-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
{"text": "Nightly CI has completed successfully."}
|
||||
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
27
.github/workflows/nightly-funtional-tests.yml
vendored
27
.github/workflows/nightly-funtional-tests.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: Nightly functional tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At 00:00 on Tuesday.
|
||||
- cron: '0 0 * * 2'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
name: Run sqlness test
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
data-root: sqlness-test
|
||||
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
29
.github/workflows/pr-title-checker.yml
vendored
29
.github/workflows/pr-title-checker.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: "PR Title Checker"
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
breaking:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@v1.4.2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
pass_on_octokit_error: false
|
||||
configuration_path: ".github/pr-title-breaking-change-label-config.json"
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,12 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -102,7 +107,7 @@ jobs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
windows-runner: windows-2022-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -245,7 +250,7 @@ jobs:
|
||||
- name: Set build macos result
|
||||
id: set-build-macos-result
|
||||
run: |
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
@@ -318,7 +323,7 @@ jobs:
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
@@ -436,7 +441,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub,
|
||||
@@ -447,16 +452,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy release successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
- name: Notify release successful result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has completed successfully."}
|
||||
|
||||
- name: Notifiy release failed result
|
||||
- name: Notify release failed result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
|
||||
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
24
.github/workflows/schedule.yml
vendored
Normal file
24
.github/workflows/schedule.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Schedule Management
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
maintenance:
|
||||
name: Periodic Maintenance
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: "Semantic Pull Request"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
21
.github/workflows/unassign.yml
vendored
21
.github/workflows/unassign.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: Auto Unassign
|
||||
on:
|
||||
schedule:
|
||||
- cron: '4 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
auto-unassign:
|
||||
name: Auto Unassign
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Auto Unassign
|
||||
uses: tisonspieces/auto-unassign@main
|
||||
with:
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
|
||||
|
||||
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
|
||||
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
|
||||
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
|
||||
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
|
||||
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
|
||||
|
||||
|
||||
1861
Cargo.lock
generated
1861
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -4,6 +4,7 @@ members = [
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/cache",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
@@ -11,6 +12,7 @@ members = [
|
||||
"src/common/config",
|
||||
"src/common/datasource",
|
||||
"src/common/error",
|
||||
"src/common/frontend",
|
||||
"src/common/function",
|
||||
"src/common/macro",
|
||||
"src/common/greptimedb-telemetry",
|
||||
@@ -62,13 +64,14 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.7.2"
|
||||
version = "0.8.1"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
@@ -98,6 +101,8 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
@@ -105,6 +110,7 @@ datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev
|
||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
derive_builder = "0.12"
|
||||
@@ -114,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -134,6 +140,7 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.4" }
|
||||
prost = "0.12"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
@@ -152,10 +159,10 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -164,13 +171,15 @@ tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.11", features = ["tls"] }
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
@@ -180,6 +189,7 @@ common-config = { path = "src/common/config" }
|
||||
common-datasource = { path = "src/common/datasource" }
|
||||
common-decimal = { path = "src/common/decimal" }
|
||||
common-error = { path = "src/common/error" }
|
||||
common-frontend = { path = "src/common/frontend" }
|
||||
common-function = { path = "src/common/function" }
|
||||
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
|
||||
common-grpc = { path = "src/common/grpc" }
|
||||
@@ -201,6 +211,7 @@ common-wal = { path = "src/common/wal" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
@@ -236,3 +247,12 @@ strip = true
|
||||
lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
[profile.ci]
|
||||
inherits = "dev"
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
6
Makefile
6
Makefile
@@ -54,8 +54,10 @@ ifneq ($(strip $(RELEASE)),)
|
||||
CARGO_BUILD_OPTS += --release
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
|
||||
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
@@ -197,7 +199,7 @@ config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb/config \
|
||||
toml2docs/toml2docs:latest \
|
||||
toml2docs/toml2docs:v0.1.1 \
|
||||
-p '##' \
|
||||
-t ./config-docs-template.md \
|
||||
-o ./config.md
|
||||
|
||||
@@ -12,7 +12,7 @@ api.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
client = { workspace = true, features = ["testing"] }
|
||||
common-base.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-wal.workspace = true
|
||||
|
||||
@@ -1,513 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
|
||||
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
|
||||
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
|
||||
use arrow::datatypes::{DataType, Float64Type, Int64Type};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use clap::Parser;
|
||||
use client::api::v1::column::Values;
|
||||
use client::api::v1::{
|
||||
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
|
||||
};
|
||||
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use futures_util::TryStreamExt;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
const CATALOG_NAME: &str = "greptime";
|
||||
const SCHEMA_NAME: &str = "public";
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "NYC benchmark runner")]
|
||||
struct Args {
|
||||
/// Path to the dataset
|
||||
#[arg(short, long)]
|
||||
path: Option<String>,
|
||||
|
||||
/// Batch size of insert request.
|
||||
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
|
||||
batch_size: usize,
|
||||
|
||||
/// Number of client threads on write (parallel on file level)
|
||||
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
|
||||
thread_num: usize,
|
||||
|
||||
/// Number of query iteration
|
||||
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
|
||||
iter_num: usize,
|
||||
|
||||
#[arg(long = "skip-write")]
|
||||
skip_write: bool,
|
||||
|
||||
#[arg(long = "skip-read")]
|
||||
skip_read: bool,
|
||||
|
||||
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
|
||||
std::fs::read_dir(path)
|
||||
.unwrap()
|
||||
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn new_table_name() -> String {
|
||||
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
|
||||
}
|
||||
|
||||
async fn write_data(
|
||||
table_name: &str,
|
||||
batch_size: usize,
|
||||
db: &Database,
|
||||
path: PathBuf,
|
||||
mpb: MultiProgress,
|
||||
pb_style: ProgressStyle,
|
||||
) -> u128 {
|
||||
let file = std::fs::File::open(&path).unwrap();
|
||||
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
|
||||
let row_num = record_batch_reader_builder
|
||||
.metadata()
|
||||
.file_metadata()
|
||||
.num_rows();
|
||||
let record_batch_reader = record_batch_reader_builder
|
||||
.with_batch_size(batch_size)
|
||||
.build()
|
||||
.unwrap();
|
||||
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
|
||||
progress_bar.set_style(pb_style);
|
||||
progress_bar.set_message(format!("{path:?}"));
|
||||
|
||||
let mut total_rpc_elapsed_ms = 0;
|
||||
|
||||
for record_batch in record_batch_reader {
|
||||
let record_batch = record_batch.unwrap();
|
||||
if !is_record_batch_full(&record_batch) {
|
||||
continue;
|
||||
}
|
||||
let (columns, row_count) = convert_record_batch(record_batch);
|
||||
let request = InsertRequest {
|
||||
table_name: table_name.to_string(),
|
||||
columns,
|
||||
row_count,
|
||||
};
|
||||
let requests = InsertRequests {
|
||||
inserts: vec![request],
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
db.insert(requests).await.unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
total_rpc_elapsed_ms += elapsed.as_millis();
|
||||
progress_bar.inc(row_count as _);
|
||||
}
|
||||
|
||||
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
|
||||
total_rpc_elapsed_ms
|
||||
}
|
||||
|
||||
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
|
||||
let schema = record_batch.schema();
|
||||
let fields = schema.fields();
|
||||
let row_count = record_batch.num_rows();
|
||||
let mut columns = vec![];
|
||||
|
||||
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
|
||||
let (values, datatype) = build_values(array);
|
||||
let semantic_type = match field.name().as_str() {
|
||||
"VendorID" => SemanticType::Tag,
|
||||
"tpep_pickup_datetime" => SemanticType::Timestamp,
|
||||
_ => SemanticType::Field,
|
||||
};
|
||||
|
||||
let column = Column {
|
||||
column_name: field.name().clone(),
|
||||
values: Some(values),
|
||||
null_mask: array
|
||||
.to_data()
|
||||
.nulls()
|
||||
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
|
||||
.unwrap_or_default(),
|
||||
datatype: datatype.into(),
|
||||
semantic_type: semantic_type as i32,
|
||||
..Default::default()
|
||||
};
|
||||
columns.push(column);
|
||||
}
|
||||
|
||||
(columns, row_count as _)
|
||||
}
|
||||
|
||||
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
|
||||
match column.data_type() {
|
||||
DataType::Int64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Int64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
i64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Int64,
|
||||
)
|
||||
}
|
||||
DataType::Float64 => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<PrimitiveArray<Float64Type>>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
f64_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Float64,
|
||||
)
|
||||
}
|
||||
DataType::Timestamp(_, _) => {
|
||||
let array = column
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMicrosecondArray>()
|
||||
.unwrap();
|
||||
let values = array.values();
|
||||
(
|
||||
Values {
|
||||
timestamp_microsecond_values: values.to_vec(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::TimestampMicrosecond,
|
||||
)
|
||||
}
|
||||
DataType::Utf8 => {
|
||||
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
|
||||
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
|
||||
(
|
||||
Values {
|
||||
string_values: values,
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::String,
|
||||
)
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_record_batch_full(batch: &RecordBatch) -> bool {
|
||||
batch.columns().iter().all(|col| col.null_count() == 0)
|
||||
}
|
||||
|
||||
fn create_table_expr(table_name: &str) -> CreateTableExpr {
|
||||
CreateTableExpr {
|
||||
catalog_name: CATALOG_NAME.to_string(),
|
||||
schema_name: SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
desc: String::default(),
|
||||
column_defs: vec![
|
||||
ColumnDef {
|
||||
name: "VendorID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_pickup_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: false,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Timestamp as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tpep_dropoff_datetime".to_string(),
|
||||
data_type: ColumnDataType::TimestampMicrosecond as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "passenger_count".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "trip_distance".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "RatecodeID".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "store_and_fwd_flag".to_string(),
|
||||
data_type: ColumnDataType::String as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "PULocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "DOLocationID".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "payment_type".to_string(),
|
||||
data_type: ColumnDataType::Int64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "fare_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "extra".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "mta_tax".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tip_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "tolls_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "improvement_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "total_amount".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "congestion_surcharge".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDef {
|
||||
name: "airport_fee".to_string(),
|
||||
data_type: ColumnDataType::Float64 as i32,
|
||||
is_nullable: true,
|
||||
default_constraint: vec![],
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
comment: String::new(),
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
time_index: "tpep_pickup_datetime".to_string(),
|
||||
primary_keys: vec!["VendorID".to_string()],
|
||||
create_if_not_exists: true,
|
||||
table_options: Default::default(),
|
||||
table_id: None,
|
||||
engine: "mito".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn query_set(table_name: &str) -> HashMap<String, String> {
|
||||
HashMap::from([
|
||||
(
|
||||
"count_all".to_string(),
|
||||
format!("SELECT COUNT(*) FROM {table_name};"),
|
||||
),
|
||||
(
|
||||
"fare_amt_by_passenger".to_string(),
|
||||
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
async fn do_write(args: &Args, db: &Database, table_name: &str) {
|
||||
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
|
||||
let mut write_jobs = JoinSet::new();
|
||||
|
||||
let create_table_result = db.create(create_table_expr(table_name)).await;
|
||||
println!("Create table result: {create_table_result:?}");
|
||||
|
||||
let progress_bar_style = ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("##-");
|
||||
let multi_progress_bar = MultiProgress::new();
|
||||
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
|
||||
file_progress.inc(0);
|
||||
|
||||
let batch_size = args.batch_size;
|
||||
for _ in 0..args.thread_num {
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
while write_jobs.join_next().await.is_some() {
|
||||
file_progress.inc(1);
|
||||
if let Some(path) = file_list.pop() {
|
||||
let db = db.clone();
|
||||
let mpb = multi_progress_bar.clone();
|
||||
let pb_style = progress_bar_style.clone();
|
||||
let table_name = table_name.to_string();
|
||||
let _ = write_jobs.spawn(async move {
|
||||
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
|
||||
for (query_name, query) in query_set(table_name) {
|
||||
println!("Running query: {query}");
|
||||
for i in 0..num_iter {
|
||||
let now = Instant::now();
|
||||
let res = db.sql(&query).await.unwrap();
|
||||
match res.data {
|
||||
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
|
||||
OutputData::Stream(stream) => {
|
||||
stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
}
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
println!(
|
||||
"query {}, iteration {}: {}ms",
|
||||
query_name,
|
||||
i,
|
||||
elapsed.as_millis(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(args.thread_num)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
let client = Client::with_urls(vec![&args.endpoint]);
|
||||
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
let table_name = new_table_name();
|
||||
|
||||
if !args.skip_write {
|
||||
do_write(&args, &db, &table_name).await;
|
||||
}
|
||||
|
||||
if !args.skip_read {
|
||||
do_query(args.iter_num, &db, &table_name).await;
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -28,6 +28,7 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::logstore::provider::Provider;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
@@ -210,7 +211,7 @@ impl From<Args> for Config {
|
||||
pub struct Region {
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
wal_options: WalOptions,
|
||||
provider: Provider,
|
||||
next_sequence: AtomicU64,
|
||||
next_entry_id: AtomicU64,
|
||||
next_timestamp: AtomicI64,
|
||||
@@ -227,10 +228,14 @@ impl Region {
|
||||
num_rows: u32,
|
||||
rng_seed: u64,
|
||||
) -> Self {
|
||||
let provider = match wal_options {
|
||||
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||
};
|
||||
Self {
|
||||
id,
|
||||
schema,
|
||||
wal_options,
|
||||
provider,
|
||||
next_sequence: AtomicU64::new(1),
|
||||
next_entry_id: AtomicU64::new(1),
|
||||
next_timestamp: AtomicI64::new(1655276557000),
|
||||
@@ -258,14 +263,14 @@ impl Region {
|
||||
self.id,
|
||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||
&entry,
|
||||
&self.wal_options,
|
||||
&self.provider,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Replays the region.
|
||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||
while let Some(res) = wal_stream.next().await {
|
||||
let (_, entry) = res.unwrap();
|
||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
{{ toml2docs "./standalone.example.toml" }}
|
||||
|
||||
## Cluster Mode
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
@@ -14,6 +20,11 @@
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
@@ -27,15 +38,13 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
@@ -98,6 +107,10 @@
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -129,9 +142,11 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
## Cluster Mode
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
@@ -149,6 +164,11 @@
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
@@ -162,15 +182,13 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
|
||||
| `opentsdb` | -- | -- | OpenTSDB protocol options. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable |
|
||||
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
|
||||
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
|
||||
| `influxdb` | -- | -- | InfluxDB protocol options. |
|
||||
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
|
||||
| `prom_store` | -- | -- | Prometheus remote storage options |
|
||||
@@ -188,7 +206,6 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
@@ -207,6 +224,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -263,6 +282,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -343,6 +364,10 @@
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -374,3 +399,5 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -324,6 +324,18 @@ vector_cache_size = "512MB"
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -428,3 +440,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -30,6 +30,23 @@ addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -70,7 +87,7 @@ addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
@@ -88,12 +105,8 @@ watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
enable = true
|
||||
## OpenTSDB telnet API server address.
|
||||
addr = "127.0.0.1:4242"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
@@ -140,7 +153,6 @@ metadata_cache_tti = "5m"
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -190,3 +202,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -141,3 +141,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -25,6 +25,23 @@ addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -65,7 +82,7 @@ addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
@@ -83,12 +100,8 @@ watch = false
|
||||
|
||||
## OpenTSDB protocol options.
|
||||
[opentsdb]
|
||||
## Whether to enable
|
||||
## Whether to enable OpenTSDB put in HTTP API.
|
||||
enable = true
|
||||
## OpenTSDB telnet API server address.
|
||||
addr = "127.0.0.1:4242"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## InfluxDB protocol options.
|
||||
[influxdb]
|
||||
@@ -371,6 +384,18 @@ vector_cache_size = "512MB"
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -475,3 +500,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
2
cyborg/.gitignore
vendored
Normal file
2
cyborg/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules
|
||||
.env
|
||||
79
cyborg/bin/check-pull-request.ts
Normal file
79
cyborg/bin/check-pull-request.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEvent} from "@octokit/webhooks-types";
|
||||
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
|
||||
import conventionalCommitTypes from 'conventional-commit-types';
|
||||
import _ from "lodash";
|
||||
|
||||
const defaultTypes = Object.keys(conventionalCommitTypes.types)
|
||||
const breakingChangeLabel = "breaking-change"
|
||||
|
||||
// These options are copied from [1].
|
||||
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
|
||||
export const parserOpts: Options = {
|
||||
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
|
||||
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
|
||||
headerCorrespondence: [
|
||||
'type',
|
||||
'scope',
|
||||
'subject'
|
||||
],
|
||||
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
|
||||
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
|
||||
revertCorrespondence: ['header', 'hash'],
|
||||
issuePrefixes: ['#']
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
}
|
||||
const { data: pull_request } = await client.rest.pulls.get({
|
||||
owner, repo, pull_number: number,
|
||||
})
|
||||
|
||||
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
|
||||
core.info(`Receive commit: ${JSON.stringify(commit)}`)
|
||||
|
||||
if (!commit.type) {
|
||||
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
if (!defaultTypes.includes(commit.type)) {
|
||||
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
|
||||
}
|
||||
|
||||
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
|
||||
if (breakingChanges.length > 0) {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [breakingChangeLabel]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||
import {RequestError} from "@octokit/request-error";
|
||||
|
||||
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||
const labelDocsNotRequired = "docs-not-required"
|
||||
const labelDocsRequired = "docs-required"
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number, actor, title, html_url } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
title: payload.pull_request.title,
|
||||
html_url: payload.pull_request.html_url,
|
||||
actor: payload.pull_request.user.login,
|
||||
}
|
||||
const followUpDocs = checkPullRequestEvent(payload)
|
||||
if (followUpDocs) {
|
||||
core.info("Follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
} else {
|
||||
core.info("No need to follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsRequired
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||
switch (payload.action) {
|
||||
case "opened":
|
||||
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||
case "edited":
|
||||
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||
default:
|
||||
throw new Error(`${payload.action} is unsupported.`)
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||
// @ts-ignore
|
||||
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||
}
|
||||
|
||||
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||
// from docs-not-need to docs-required
|
||||
return (!previous) && current
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
83
cyborg/bin/report-ci-failure.ts
Normal file
83
cyborg/bin/report-ci-failure.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common"
|
||||
import {context} from "@actions/github"
|
||||
import _ from "lodash"
|
||||
|
||||
async function main() {
|
||||
const success = process.env["CI_REPORT_STATUS"] === "true"
|
||||
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const title = `Workflow run '${context.workflow}' failed`
|
||||
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
|
||||
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
|
||||
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
||||
|
||||
const {owner, repo} = context.repo
|
||||
const labels = ['O-ci-failure']
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
labels: labels.join(','),
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "desc",
|
||||
});
|
||||
const issue = _.find(issues, (i) => i.title === title);
|
||||
|
||||
if (issue) { // exist issue
|
||||
core.info(`Found previous issue ${issue.html_url}`)
|
||||
if (!success) {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: failure_comment,
|
||||
})
|
||||
} else {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: success_comment,
|
||||
})
|
||||
await client.rest.issues.update({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
state: "closed",
|
||||
state_reason: "completed",
|
||||
})
|
||||
}
|
||||
core.setOutput("html_url", issue.html_url)
|
||||
} else if (!success) { // create new issue for failure
|
||||
const issue = await client.rest.issues.create({
|
||||
owner,
|
||||
repo,
|
||||
title,
|
||||
labels,
|
||||
body: failure_comment,
|
||||
})
|
||||
core.info(`Created issue ${issue.data.html_url}`)
|
||||
core.setOutput("html_url", issue.data.html_url)
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
73
cyborg/bin/schedule.ts
Normal file
73
cyborg/bin/schedule.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {GitHub} from "@actions/github/lib/utils"
|
||||
import _ from "lodash";
|
||||
import dayjs from "dayjs";
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
|
||||
async function main() {
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
await unassign(client)
|
||||
}
|
||||
|
||||
async function unassign(client: InstanceType<typeof GitHub>) {
|
||||
const owner = "GreptimeTeam"
|
||||
const repo = "greptimedb"
|
||||
|
||||
const dt = dayjs().subtract(14, 'days');
|
||||
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
|
||||
|
||||
const members = await client.paginate(client.rest.repos.listCollaborators, {
|
||||
owner,
|
||||
repo,
|
||||
permission: "push",
|
||||
per_page: 100
|
||||
}).then((members) => members.map((member) => member.login))
|
||||
core.info(`Members (${members.length}): ${members}`)
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "asc",
|
||||
per_page: 100
|
||||
})
|
||||
for (const issue of issues) {
|
||||
let assignees = [];
|
||||
if (issue.assignee) {
|
||||
assignees.push(issue.assignee.login)
|
||||
}
|
||||
for (const assignee of issue.assignees) {
|
||||
assignees.push(assignee.login)
|
||||
}
|
||||
assignees = _.uniq(assignees)
|
||||
assignees = _.difference(assignees, members)
|
||||
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
|
||||
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
|
||||
await client.rest.issues.removeAssignees({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
assignees: assignees,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
26
cyborg/package.json
Normal file
26
cyborg/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "cyborg",
|
||||
"version": "1.0.0",
|
||||
"description": "Automator for GreptimeDB Repository Management",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@8.15.5",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/request-error": "^6.1.1",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
"dayjs": "^1.11.11",
|
||||
"dotenv": "^16.4.5",
|
||||
"lodash": "^4.17.21"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/conventional-commits-parser": "^5.0.0",
|
||||
"@types/lodash": "^4.17.0",
|
||||
"@types/node": "^20.12.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"tsx": "^4.8.2",
|
||||
"typescript": "^5.4.5"
|
||||
}
|
||||
}
|
||||
612
cyborg/pnpm-lock.yaml
generated
Normal file
612
cyborg/pnpm-lock.yaml
generated
Normal file
@@ -0,0 +1,612 @@
|
||||
lockfileVersion: '6.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@actions/core':
|
||||
specifier: ^1.10.1
|
||||
version: 1.10.1
|
||||
'@actions/github':
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
'@octokit/request-error':
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
'@octokit/webhooks-types':
|
||||
specifier: ^7.5.1
|
||||
version: 7.5.1
|
||||
conventional-commit-types:
|
||||
specifier: ^3.0.0
|
||||
version: 3.0.0
|
||||
conventional-commits-parser:
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
dayjs:
|
||||
specifier: ^1.11.11
|
||||
version: 1.11.11
|
||||
dotenv:
|
||||
specifier: ^16.4.5
|
||||
version: 16.4.5
|
||||
lodash:
|
||||
specifier: ^4.17.21
|
||||
version: 4.17.21
|
||||
|
||||
devDependencies:
|
||||
'@types/conventional-commits-parser':
|
||||
specifier: ^5.0.0
|
||||
version: 5.0.0
|
||||
'@types/lodash':
|
||||
specifier: ^4.17.0
|
||||
version: 4.17.0
|
||||
'@types/node':
|
||||
specifier: ^20.12.7
|
||||
version: 20.12.7
|
||||
tsconfig-paths:
|
||||
specifier: ^4.2.0
|
||||
version: 4.2.0
|
||||
tsx:
|
||||
specifier: ^4.8.2
|
||||
version: 4.8.2
|
||||
typescript:
|
||||
specifier: ^5.4.5
|
||||
version: 5.4.5
|
||||
|
||||
packages:
|
||||
|
||||
/@actions/core@1.10.1:
|
||||
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
uuid: 8.3.2
|
||||
dev: false
|
||||
|
||||
/@actions/github@6.0.0:
|
||||
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
|
||||
dependencies:
|
||||
'@actions/http-client': 2.2.1
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
|
||||
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
|
||||
dev: false
|
||||
|
||||
/@actions/http-client@2.2.1:
|
||||
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
|
||||
dependencies:
|
||||
tunnel: 0.0.6
|
||||
undici: 5.28.4
|
||||
dev: false
|
||||
|
||||
/@esbuild/aix-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [aix]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-arm@0.20.2:
|
||||
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/android-x64@0.20.2:
|
||||
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [android]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/darwin-x64@0.20.2:
|
||||
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/freebsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-arm@0.20.2:
|
||||
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-loong64@0.20.2:
|
||||
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-mips64el@0.20.2:
|
||||
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [mips64el]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-ppc64@0.20.2:
|
||||
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-riscv64@0.20.2:
|
||||
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-s390x@0.20.2:
|
||||
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/linux-x64@0.20.2:
|
||||
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/netbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [netbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/openbsd-x64@0.20.2:
|
||||
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [openbsd]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/sunos-x64@0.20.2:
|
||||
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [sunos]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-arm64@0.20.2:
|
||||
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-ia32@0.20.2:
|
||||
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@esbuild/win32-x64@0.20.2:
|
||||
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
|
||||
engines: {node: '>=12'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@fastify/busboy@2.1.1:
|
||||
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
|
||||
engines: {node: '>=14'}
|
||||
dev: false
|
||||
|
||||
/@octokit/auth-token@4.0.0:
|
||||
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
|
||||
engines: {node: '>= 18'}
|
||||
dev: false
|
||||
|
||||
/@octokit/core@5.2.0:
|
||||
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/auth-token': 4.0.0
|
||||
'@octokit/graphql': 7.1.0
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
before-after-hook: 2.2.3
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/endpoint@9.0.5:
|
||||
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/graphql@7.1.0:
|
||||
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/request': 8.4.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@20.0.0:
|
||||
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
|
||||
dev: false
|
||||
|
||||
/@octokit/openapi-types@22.2.0:
|
||||
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
|
||||
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
|
||||
engines: {node: '>= 18'}
|
||||
peerDependencies:
|
||||
'@octokit/core': '5'
|
||||
dependencies:
|
||||
'@octokit/core': 5.2.0
|
||||
'@octokit/types': 12.6.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@5.1.0:
|
||||
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
deprecation: 2.3.1
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@6.1.1:
|
||||
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request@8.4.0:
|
||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/endpoint': 9.0.5
|
||||
'@octokit/request-error': 5.1.0
|
||||
'@octokit/types': 13.5.0
|
||||
universal-user-agent: 6.0.1
|
||||
dev: false
|
||||
|
||||
/@octokit/types@12.6.0:
|
||||
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 20.0.0
|
||||
dev: false
|
||||
|
||||
/@octokit/types@13.5.0:
|
||||
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
|
||||
dependencies:
|
||||
'@octokit/openapi-types': 22.2.0
|
||||
dev: false
|
||||
|
||||
/@octokit/webhooks-types@7.5.1:
|
||||
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
|
||||
dev: false
|
||||
|
||||
/@types/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
|
||||
dependencies:
|
||||
'@types/node': 20.12.7
|
||||
dev: true
|
||||
|
||||
/@types/lodash@4.17.0:
|
||||
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
|
||||
dev: true
|
||||
|
||||
/@types/node@20.12.7:
|
||||
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
|
||||
dependencies:
|
||||
undici-types: 5.26.5
|
||||
dev: true
|
||||
|
||||
/JSONStream@1.3.5:
|
||||
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
jsonparse: 1.3.1
|
||||
through: 2.3.8
|
||||
dev: false
|
||||
|
||||
/before-after-hook@2.2.3:
|
||||
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
|
||||
dev: false
|
||||
|
||||
/conventional-commit-types@3.0.0:
|
||||
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
|
||||
dev: false
|
||||
|
||||
/conventional-commits-parser@5.0.0:
|
||||
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
|
||||
engines: {node: '>=16'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
JSONStream: 1.3.5
|
||||
is-text-path: 2.0.0
|
||||
meow: 12.1.1
|
||||
split2: 4.2.0
|
||||
dev: false
|
||||
|
||||
/dayjs@1.11.11:
|
||||
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
|
||||
dev: false
|
||||
|
||||
/deprecation@2.3.1:
|
||||
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
|
||||
dev: false
|
||||
|
||||
/dotenv@16.4.5:
|
||||
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||
engines: {node: '>=12'}
|
||||
dev: false
|
||||
|
||||
/esbuild@0.20.2:
|
||||
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
|
||||
engines: {node: '>=12'}
|
||||
hasBin: true
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@esbuild/aix-ppc64': 0.20.2
|
||||
'@esbuild/android-arm': 0.20.2
|
||||
'@esbuild/android-arm64': 0.20.2
|
||||
'@esbuild/android-x64': 0.20.2
|
||||
'@esbuild/darwin-arm64': 0.20.2
|
||||
'@esbuild/darwin-x64': 0.20.2
|
||||
'@esbuild/freebsd-arm64': 0.20.2
|
||||
'@esbuild/freebsd-x64': 0.20.2
|
||||
'@esbuild/linux-arm': 0.20.2
|
||||
'@esbuild/linux-arm64': 0.20.2
|
||||
'@esbuild/linux-ia32': 0.20.2
|
||||
'@esbuild/linux-loong64': 0.20.2
|
||||
'@esbuild/linux-mips64el': 0.20.2
|
||||
'@esbuild/linux-ppc64': 0.20.2
|
||||
'@esbuild/linux-riscv64': 0.20.2
|
||||
'@esbuild/linux-s390x': 0.20.2
|
||||
'@esbuild/linux-x64': 0.20.2
|
||||
'@esbuild/netbsd-x64': 0.20.2
|
||||
'@esbuild/openbsd-x64': 0.20.2
|
||||
'@esbuild/sunos-x64': 0.20.2
|
||||
'@esbuild/win32-arm64': 0.20.2
|
||||
'@esbuild/win32-ia32': 0.20.2
|
||||
'@esbuild/win32-x64': 0.20.2
|
||||
dev: true
|
||||
|
||||
/fsevents@2.3.3:
|
||||
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
|
||||
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/get-tsconfig@4.7.3:
|
||||
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
|
||||
dependencies:
|
||||
resolve-pkg-maps: 1.0.0
|
||||
dev: true
|
||||
|
||||
/is-text-path@2.0.0:
|
||||
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
text-extensions: 2.4.0
|
||||
dev: false
|
||||
|
||||
/json5@2.2.3:
|
||||
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
|
||||
engines: {node: '>=6'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/jsonparse@1.3.1:
|
||||
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
|
||||
engines: {'0': node >= 0.2.0}
|
||||
dev: false
|
||||
|
||||
/lodash@4.17.21:
|
||||
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
|
||||
dev: false
|
||||
|
||||
/meow@12.1.1:
|
||||
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
|
||||
engines: {node: '>=16.10'}
|
||||
dev: false
|
||||
|
||||
/minimist@1.2.8:
|
||||
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
|
||||
dev: true
|
||||
|
||||
/once@1.4.0:
|
||||
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
|
||||
dependencies:
|
||||
wrappy: 1.0.2
|
||||
dev: false
|
||||
|
||||
/resolve-pkg-maps@1.0.0:
|
||||
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
|
||||
dev: true
|
||||
|
||||
/split2@4.2.0:
|
||||
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
|
||||
engines: {node: '>= 10.x'}
|
||||
dev: false
|
||||
|
||||
/strip-bom@3.0.0:
|
||||
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
|
||||
engines: {node: '>=4'}
|
||||
dev: true
|
||||
|
||||
/text-extensions@2.4.0:
|
||||
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
|
||||
engines: {node: '>=8'}
|
||||
dev: false
|
||||
|
||||
/through@2.3.8:
|
||||
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
|
||||
dev: false
|
||||
|
||||
/tsconfig-paths@4.2.0:
|
||||
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
|
||||
engines: {node: '>=6'}
|
||||
dependencies:
|
||||
json5: 2.2.3
|
||||
minimist: 1.2.8
|
||||
strip-bom: 3.0.0
|
||||
dev: true
|
||||
|
||||
/tsx@4.8.2:
|
||||
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
esbuild: 0.20.2
|
||||
get-tsconfig: 4.7.3
|
||||
optionalDependencies:
|
||||
fsevents: 2.3.3
|
||||
dev: true
|
||||
|
||||
/tunnel@0.0.6:
|
||||
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
|
||||
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
|
||||
dev: false
|
||||
|
||||
/typescript@5.4.5:
|
||||
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
|
||||
engines: {node: '>=14.17'}
|
||||
hasBin: true
|
||||
dev: true
|
||||
|
||||
/undici-types@5.26.5:
|
||||
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
|
||||
dev: true
|
||||
|
||||
/undici@5.28.4:
|
||||
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
|
||||
engines: {node: '>=14.0'}
|
||||
dependencies:
|
||||
'@fastify/busboy': 2.1.1
|
||||
dev: false
|
||||
|
||||
/universal-user-agent@6.0.1:
|
||||
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
|
||||
dev: false
|
||||
|
||||
/uuid@8.3.2:
|
||||
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
|
||||
hasBin: true
|
||||
dev: false
|
||||
|
||||
/wrappy@1.0.2:
|
||||
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||
dev: false
|
||||
30
cyborg/src/common.ts
Normal file
30
cyborg/src/common.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from "@actions/core";
|
||||
import {config} from "dotenv";
|
||||
import {getOctokit} from "@actions/github";
|
||||
import {GitHub} from "@actions/github/lib/utils";
|
||||
|
||||
export function handleError(err: any): void {
|
||||
console.error(err)
|
||||
core.setFailed(`Unhandled error: ${err}`)
|
||||
}
|
||||
|
||||
export function obtainClient(token: string): InstanceType<typeof GitHub> {
|
||||
config()
|
||||
return getOctokit(process.env[token])
|
||||
}
|
||||
14
cyborg/tsconfig.json
Normal file
14
cyborg/tsconfig.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"ts-node": {
|
||||
"require": ["tsconfig-paths/register"]
|
||||
},
|
||||
"compilerOptions": {
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"target": "ES6",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
},
|
||||
"resolveJsonModule": true,
|
||||
}
|
||||
}
|
||||
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
ARG TARGET_BIN=greptime
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
curl
|
||||
|
||||
ARG BINARY_PATH
|
||||
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
RUN cargo install cargo-ndk@3.5.4
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
|
||||
@@ -23,28 +23,28 @@
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 369581.464 |
|
||||
| EC2 c5d.2xlarge | 298716.664 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
|
||||
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TSBS benchmark - v0.8.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 50GB (GP3) |
|
||||
| OS | Ubuntu 22.04.1 |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 315369.66 |
|
||||
| EC2 c5d.2xlarge | 222148.56 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 24.63 | 15.29 |
|
||||
| cpu-max-all-8 | 51.69 | 33.53 |
|
||||
| double-groupby-1 | 673.51 | 1295.38 |
|
||||
| double-groupby-5 | 1244.93 | 1993.91 |
|
||||
| double-groupby-all | 2215.44 | 3056.77 |
|
||||
| groupby-orderby-limit | 754.50 | 1546.49 |
|
||||
| high-cpu-1 | 19.62 | 11.58 |
|
||||
| high-cpu-all | 5402.31 | 8011.43 |
|
||||
| lastpoint | 6756.12 | 9312.67 |
|
||||
| single-groupby-1-1-1 | 15.70 | 7.67 |
|
||||
| single-groupby-1-1-12 | 16.72 | 9.29 |
|
||||
| single-groupby-1-8-1 | 26.72 | 17.97 |
|
||||
| single-groupby-5-1-1 | 18.17 | 10.09 |
|
||||
| single-groupby-5-1-12 | 20.04 | 12.37 |
|
||||
| single-groupby-5-8-1 | 35.63 | 23.13 |
|
||||
|
||||
`single-groupby-1-1-1` query throughput
|
||||
|
||||
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||
| --------------- | ------------------ | -------------- | ----------------- |
|
||||
| Local | 50 | 42.87 | 1165.73 |
|
||||
| Local | 100 | 89.29 | 1119.38 |
|
||||
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
|
||||
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |
|
||||
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
136
docs/how-to/how-to-write-fuzz-tests.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# How to write fuzz tests
|
||||
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
- Integrate with other tests (e.g., e2e)
|
||||
|
||||
## Resources
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
Fuzz Test
|
||||
|
||||
```
|
||||
(Figure1: Overview of fuzz tests)
|
||||
|
||||
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
|
||||
|
||||
## How to add a fuzz test target
|
||||
|
||||
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
|
||||
|
||||
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
|
||||
|
||||
```toml
|
||||
[[bin]]
|
||||
name = "<fuzz-target>"
|
||||
path = "targets/<fuzz-target>.rs"
|
||||
test = false
|
||||
bench = false
|
||||
doc = false
|
||||
```
|
||||
|
||||
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
#![no_main]
|
||||
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FuzzInput {
|
||||
seed: u64,
|
||||
}
|
||||
|
||||
impl Arbitrary<'_> for FuzzInput {
|
||||
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
|
||||
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
|
||||
Ok(FuzzInput { seed })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
|
||||
|
||||
```rust
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use snafu::ResultExt;
|
||||
use sqlx::{MySql, Pool};
|
||||
use tests_fuzz::fake::{
|
||||
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
|
||||
MappedGenerator, WordGenerator,
|
||||
};
|
||||
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
|
||||
use tests_fuzz::generator::Generator;
|
||||
use tests_fuzz::ir::CreateTableExpr;
|
||||
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
|
||||
use tests_fuzz::translator::DslTranslator;
|
||||
use tests_fuzz::utils::{init_greptime_connections, Connections};
|
||||
|
||||
fuzz_target!(|input: FuzzInput| {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
common_runtime::block_on_write(async {
|
||||
let Connections { mysql } = init_greptime_connections().await;
|
||||
let mut rng = ChaChaRng::seed_from_u64(input.seed);
|
||||
let columns = rng.gen_range(2..30);
|
||||
let create_table_generator = CreateTableExprGeneratorBuilder::default()
|
||||
.name_generator(Box::new(MappedGenerator::new(
|
||||
WordGenerator,
|
||||
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
|
||||
)))
|
||||
.columns(columns)
|
||||
.engine("mito")
|
||||
.if_not_exists(if_not_exists)
|
||||
.build()
|
||||
.unwrap();
|
||||
let ir = create_table_generator.generate(&mut rng);
|
||||
let translator = CreateTableExprTranslator;
|
||||
let sql = translator.translate(&expr).unwrap();
|
||||
mysql.execute(&sql).await
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
5. Run your fuzz test target
|
||||
|
||||
```bash
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
@@ -73,7 +73,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
@@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq
|
||||
|
||||
# How to use
|
||||
|
||||
## `greptimedb.json`
|
||||
|
||||
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
|
||||
|
||||
## `greptimedb-cluster.json`
|
||||
|
||||
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
|
||||
|
||||
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
|
||||
|
||||
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
|
||||
|
||||
### Configuration
|
||||
|
||||
Please ensure the following configuration before importing the dashboard into Grafana.
|
||||
|
||||
__1. Prometheus scrape config__
|
||||
|
||||
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
|
||||
|
||||
```yml
|
||||
# example config
|
||||
# only to indicate how to assign labels to each target
|
||||
# modify yours accordingly
|
||||
scrape_configs:
|
||||
- job_name: metasrv
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: metasrv
|
||||
|
||||
- job_name: datanode
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode1
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode2
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: datanode3
|
||||
|
||||
- job_name: frontend
|
||||
static_configs:
|
||||
- targets: ['<ip>:<port>']
|
||||
labels:
|
||||
greptime_pod: frontend
|
||||
```
|
||||
|
||||
__2. Grafana config__
|
||||
|
||||
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
|
||||
|
||||
### Usage
|
||||
|
||||
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.
|
||||
|
||||
4862
grafana/greptimedb-cluster.json
Normal file
4862
grafana/greptimedb-cluster.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,12 +17,15 @@ headerPath = "Apache-2.0.txt"
|
||||
includes = [
|
||||
"*.rs",
|
||||
"*.py",
|
||||
"*.ts",
|
||||
]
|
||||
|
||||
excludes = [
|
||||
# copied sources
|
||||
"src/common/base/src/readable_size.rs",
|
||||
"src/common/base/src/secrets.rs",
|
||||
"src/servers/src/repeated_field.rs",
|
||||
"src/servers/src/http/test_helpers.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-18"
|
||||
channel = "nightly-2024-04-20"
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
@@ -38,12 +39,14 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
from: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -51,6 +54,7 @@ pub enum Error {
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -20,21 +20,20 @@ use common_decimal::Decimal128;
|
||||
use common_time::interval::IntervalUnit;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Duration, Interval, Timestamp};
|
||||
use common_time::{Date, DateTime, Interval, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
|
||||
DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
|
||||
DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
|
||||
IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
|
||||
StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
|
||||
TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
UInt64Vector, VectorRef,
|
||||
};
|
||||
use greptime_proto::v1;
|
||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||
@@ -127,14 +126,6 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::IntervalMonthDayNano => {
|
||||
ConcreteDataType::interval_month_day_nano_datatype()
|
||||
}
|
||||
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
|
||||
ColumnDataType::DurationMillisecond => {
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationMicrosecond => {
|
||||
ConcreteDataType::duration_microsecond_datatype()
|
||||
}
|
||||
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
|
||||
ColumnDataType::Decimal128 => {
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
|
||||
.datatype_ext
|
||||
@@ -212,11 +203,7 @@ impl_column_type_functions_with_snake!(
|
||||
TimeNanosecond,
|
||||
IntervalYearMonth,
|
||||
IntervalDayTime,
|
||||
IntervalMonthDayNano,
|
||||
DurationSecond,
|
||||
DurationMillisecond,
|
||||
DurationMicrosecond,
|
||||
DurationNanosecond
|
||||
IntervalMonthDayNano
|
||||
);
|
||||
|
||||
impl ColumnDataTypeWrapper {
|
||||
@@ -270,16 +257,11 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
|
||||
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
},
|
||||
ConcreteDataType::Duration(d) => match d {
|
||||
DurationType::Second(_) => ColumnDataType::DurationSecond,
|
||||
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
|
||||
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
|
||||
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
|
||||
},
|
||||
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_) => {
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
|
||||
}
|
||||
};
|
||||
@@ -409,22 +391,6 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
||||
interval_month_day_nano_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationSecond => Values {
|
||||
duration_second_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMillisecond => Values {
|
||||
duration_millisecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationMicrosecond => Values {
|
||||
duration_microsecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::DurationNanosecond => Values {
|
||||
duration_nanosecond_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
},
|
||||
ColumnDataType::Decimal128 => Values {
|
||||
decimal128_values: Vec::with_capacity(capacity),
|
||||
..Default::default()
|
||||
@@ -476,14 +442,8 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
.interval_month_day_nano_values
|
||||
.push(convert_i128_to_interval(val.to_i128())),
|
||||
},
|
||||
Value::Duration(val) => match val.unit() {
|
||||
TimeUnit::Second => values.duration_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
|
||||
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
|
||||
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
|
||||
},
|
||||
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
});
|
||||
column.null_mask = null_mask.into_vec();
|
||||
}
|
||||
@@ -518,6 +478,10 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::Alter(_)) => "ddl.alter",
|
||||
Some(Expr::DropTable(_)) => "ddl.drop_table",
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
@@ -583,10 +547,6 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
|
||||
ValueRef::Interval(interval)
|
||||
}
|
||||
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
|
||||
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
|
||||
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
|
||||
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
|
||||
ValueData::Decimal128Value(v) => {
|
||||
// get precision and scale from datatype_extension
|
||||
if let Some(TypeExt::DecimalType(d)) = datatype_ext
|
||||
@@ -681,26 +641,15 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
))
|
||||
}
|
||||
},
|
||||
ConcreteDataType::Duration(unit) => match unit {
|
||||
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
|
||||
values.duration_second_values,
|
||||
)),
|
||||
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
|
||||
values.duration_millisecond_values,
|
||||
)),
|
||||
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
|
||||
values.duration_microsecond_values,
|
||||
)),
|
||||
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
|
||||
values.duration_nanosecond_values,
|
||||
)),
|
||||
},
|
||||
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
|
||||
values.decimal128_values.iter().map(|x| {
|
||||
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
|
||||
}),
|
||||
)),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -849,26 +798,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Second(_)) => values
|
||||
.duration_second_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_second(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
|
||||
.duration_millisecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_millisecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
|
||||
.duration_microsecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_microsecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
|
||||
.duration_nanosecond_values
|
||||
.into_iter()
|
||||
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
|
||||
.collect(),
|
||||
ConcreteDataType::Decimal128(d) => values
|
||||
.decimal128_values
|
||||
.into_iter()
|
||||
@@ -881,7 +810,10 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
))
|
||||
})
|
||||
.collect(),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
ConcreteDataType::Null(_)
|
||||
| ConcreteDataType::List(_)
|
||||
| ConcreteDataType::Dictionary(_)
|
||||
| ConcreteDataType::Duration(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -993,24 +925,10 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
)),
|
||||
},
|
||||
},
|
||||
Value::Duration(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::DurationSecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Millisecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Microsecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
|
||||
},
|
||||
TimeUnit::Nanosecond => v1::Value {
|
||||
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
|
||||
},
|
||||
},
|
||||
Value::Decimal128(v) => v1::Value {
|
||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
},
|
||||
Value::List(_) => return None,
|
||||
Value::List(_) | Value::Duration(_) => return None,
|
||||
};
|
||||
|
||||
Some(proto_value)
|
||||
@@ -1047,10 +965,6 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
|
||||
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
|
||||
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
|
||||
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
|
||||
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
|
||||
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
|
||||
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
|
||||
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
|
||||
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
|
||||
};
|
||||
Some(value_type)
|
||||
@@ -1108,14 +1022,8 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
|
||||
}
|
||||
}),
|
||||
Value::Duration(v) => Some(match v.unit() {
|
||||
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
|
||||
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
|
||||
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
|
||||
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
|
||||
}),
|
||||
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
||||
Value::List(_) => unreachable!(),
|
||||
Value::List(_) | Value::Duration(_) => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1125,16 +1033,15 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use datatypes::types::{
|
||||
DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
|
||||
IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
|
||||
TimestampMillisecondType, TimestampSecondType, UInt32Type,
|
||||
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType,
|
||||
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType,
|
||||
UInt32Type,
|
||||
};
|
||||
use datatypes::vectors::{
|
||||
BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
|
||||
DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
|
||||
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector,
|
||||
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
|
||||
TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
@@ -1210,10 +1117,6 @@ mod tests {
|
||||
let values = values.interval_month_day_nano_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
|
||||
let values = values.duration_millisecond_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
|
||||
let values = values_with_capacity(ColumnDataType::Decimal128, 2);
|
||||
let values = values.decimal128_values;
|
||||
assert_eq!(2, values.capacity());
|
||||
@@ -1301,10 +1204,6 @@ mod tests {
|
||||
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
|
||||
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::duration_millisecond_datatype(),
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::decimal128_datatype(10, 2),
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
|
||||
@@ -1397,12 +1296,6 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::duration_millisecond_datatype(),
|
||||
ConcreteDataType::duration_millisecond_datatype()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::decimal128_datatype(10, 2),
|
||||
@@ -1556,48 +1449,6 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_duration_values() {
|
||||
let mut column = Column {
|
||||
column_name: "test".to_string(),
|
||||
semantic_type: 0,
|
||||
values: Some(Values {
|
||||
..Default::default()
|
||||
}),
|
||||
null_mask: vec![],
|
||||
datatype: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![1, 2, 3],
|
||||
column.values.as_ref().unwrap().duration_nanosecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![7, 8, 9],
|
||||
column.values.as_ref().unwrap().duration_microsecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![4, 5, 6],
|
||||
column.values.as_ref().unwrap().duration_millisecond_values
|
||||
);
|
||||
|
||||
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
|
||||
push_vals(&mut column, 3, vector);
|
||||
assert_eq!(
|
||||
vec![10, 11, 12],
|
||||
column.values.as_ref().unwrap().duration_second_values
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_put_vector() {
|
||||
use crate::v1::SemanticType;
|
||||
@@ -1699,39 +1550,6 @@ mod tests {
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_duration_values() {
|
||||
// second
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
|
||||
Values {
|
||||
duration_second_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_second(1_i64)),
|
||||
Value::Duration(Duration::new_second(2_i64)),
|
||||
Value::Duration(Duration::new_second(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
|
||||
// millisecond
|
||||
let actual = pb_values_to_values(
|
||||
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
|
||||
Values {
|
||||
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let expect = vec![
|
||||
Value::Duration(Duration::new_millisecond(1_i64)),
|
||||
Value::Duration(Duration::new_millisecond(2_i64)),
|
||||
Value::Duration(Duration::new_millisecond(3_i64)),
|
||||
];
|
||||
assert_eq!(expect, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_interval_values() {
|
||||
// year_month
|
||||
|
||||
@@ -14,12 +14,12 @@ workspace = true
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
async-trait.workspace = true
|
||||
common-base.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
digest = "0.10"
|
||||
notify.workspace = true
|
||||
secrecy = { version = "0.8", features = ["serde", "alloc"] }
|
||||
sha1 = "0.10"
|
||||
snafu.workspace = true
|
||||
sql.workspace = true
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_base::secrets::SecretString;
|
||||
use digest::Digest;
|
||||
use secrecy::SecretString;
|
||||
use sha1::Sha1;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
|
||||
@@ -34,11 +34,13 @@ pub enum Error {
|
||||
Io {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
@@ -72,7 +74,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
PermissionDenied {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use common_base::secrets::ExposeSecret;
|
||||
|
||||
use crate::error::{
|
||||
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::path::Path;
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use common_base::secrets::ExposeSecret;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::common::{Identity, Password};
|
||||
|
||||
14
src/cache/Cargo.toml
vendored
Normal file
14
src/cache/Cargo.toml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "cache"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
moka.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
44
src/cache/src/error.rs
vendored
Normal file
44
src/cache/src/error.rs
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||
CacheRequired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
name: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
135
src/cache/src/lib.rs
vendored
Normal file
135
src/cache/src/lib.rs
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||
LayeredCacheRegistryBuilder,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
|
||||
|
||||
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||
// Builds table info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_info_cache = Arc::new(new_table_info_cache(
|
||||
TABLE_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table name cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_name_cache = Arc::new(new_table_name_cache(
|
||||
TABLE_NAME_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table route cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_route_cache = Arc::new(new_table_route_cache(
|
||||
TABLE_ROUTE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table flownode set cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
|
||||
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
// Builds the view info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let view_info_cache = Arc::new(new_view_info_cache(
|
||||
VIEW_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_info_cache)
|
||||
.add_cache(table_name_cache)
|
||||
.add_cache(table_route_cache)
|
||||
.add_cache(view_info_cache)
|
||||
.add_cache(table_flownode_set_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
// TODO(weny): Make the cache configurable.
|
||||
pub fn with_default_composite_cache_registry(
|
||||
builder: LayeredCacheRegistryBuilder,
|
||||
) -> Result<LayeredCacheRegistryBuilder> {
|
||||
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_INFO_CACHE_NAME,
|
||||
})?;
|
||||
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_NAME_CACHE_NAME,
|
||||
})?;
|
||||
|
||||
// Builds table cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_cache = Arc::new(new_table_cache(
|
||||
TABLE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
table_info_cache,
|
||||
table_name_cache,
|
||||
));
|
||||
|
||||
let registry = CacheRegistryBuilder::default()
|
||||
.add_cache(table_cache)
|
||||
.build();
|
||||
|
||||
Ok(builder.add_cache_registry(registry))
|
||||
}
|
||||
@@ -16,7 +16,9 @@ arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
bytes.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
@@ -30,6 +32,7 @@ datafusion.workspace = true
|
||||
datatypes.workspace = true
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
meta-client.workspace = true
|
||||
@@ -46,8 +49,11 @@ table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
cache.workspace = true
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
|
||||
@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -30,12 +27,14 @@ use tokio::task::JoinError;
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
@@ -43,80 +42,46 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
||||
ListTables {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||
ListNodes {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog { msg: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
||||
data_type,
|
||||
))]
|
||||
SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType,
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
||||
InvalidEntryType {
|
||||
entry_type: Option<u8>,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
||||
InvalidKey {
|
||||
key: Option<String>,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { location: Location },
|
||||
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -124,43 +89,28 @@ pub enum Error {
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, location: Location },
|
||||
TableExists {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists { schema: String, location: Location },
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
TableNotExist {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
#[snafu(display("View info not found: {}", name))]
|
||||
ViewInfoNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -170,59 +120,44 @@ pub enum Error {
|
||||
#[snafu(display("Failed to find region routes"))]
|
||||
FindRegionRoutes { source: partition::error::Error },
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
UpgradeWeakCatalogManagerRef {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||
DecodePlan {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -234,29 +169,43 @@ pub enum Error {
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||
TableCacheNotGet { key: String, location: Location },
|
||||
#[snafu(display("Failed to get table cache"))]
|
||||
GetTableCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||
GetTableCache { err_msg: String },
|
||||
#[snafu(display("Failed to get view info from cache"))]
|
||||
GetViewCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cache not found: {name}"))]
|
||||
CacheNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to cast the catalog manager"))]
|
||||
CastManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -264,60 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidKey { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
Error::SchemaNotFound { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -349,11 +281,6 @@ mod tests {
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Unexpected,
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
@@ -362,19 +289,6 @@ mod tests {
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Internal,
|
||||
Error::SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType::binary_datatype(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
EmptyValueSnafu {}.build().status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cluster_info;
|
||||
pub mod columns;
|
||||
pub mod key_column_usage;
|
||||
mod memory_table;
|
||||
@@ -23,6 +24,7 @@ pub mod schemata;
|
||||
mod table_constraints;
|
||||
mod table_names;
|
||||
pub mod tables;
|
||||
pub(crate) mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Weak};
|
||||
@@ -47,6 +49,7 @@ pub use table_names::*;
|
||||
|
||||
use self::columns::InformationSchemaColumns;
|
||||
use crate::error::Result;
|
||||
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
|
||||
use crate::information_schema::partitions::InformationSchemaPartitions;
|
||||
@@ -150,6 +153,7 @@ impl InformationSchemaProvider {
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
// SECURITY NOTE:
|
||||
// Carefully consider the tables that may expose sensitive cluster configurations,
|
||||
// authentication details, and other critical information.
|
||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||
@@ -166,6 +170,10 @@ impl InformationSchemaProvider {
|
||||
REGION_PEERS.to_string(),
|
||||
self.build_table(REGION_PEERS).unwrap(),
|
||||
);
|
||||
tables.insert(
|
||||
CLUSTER_INFO.to_string(),
|
||||
self.build_table(CLUSTER_INFO).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
|
||||
@@ -251,6 +259,9 @@ impl InformationSchemaProvider {
|
||||
self.catalog_name.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
|
||||
self.catalog_manager.clone(),
|
||||
)) as _),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
317
src/catalog/src/information_schema/cluster_info.rs
Normal file
317
src/catalog/src/information_schema/cluster_info.rs
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||
use common_meta::peer::Peer;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
};
|
||||
use snafu::ResultExt;
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::CLUSTER_INFO;
|
||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const PEER_ID: &str = "peer_id";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const VERSION: &str = "version";
|
||||
const GIT_COMMIT: &str = "git_commit";
|
||||
const START_TIME: &str = "start_time";
|
||||
const UPTIME: &str = "uptime";
|
||||
const ACTIVE_TIME: &str = "active_time";
|
||||
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `CLUSTER_INFO` table provides information about the current topology information of the cluster.
|
||||
///
|
||||
/// - `peer_id`: the peer server id.
|
||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||
/// - `peer_addr`: the peer gRPC address.
|
||||
/// - `version`: the build package version of the peer.
|
||||
/// - `git_commit`: the build git commit hash of the peer.
|
||||
/// - `start_time`: the starting time of the peer.
|
||||
/// - `uptime`: the uptime of the peer.
|
||||
/// - `active_time`: the time since the last activity of the peer.
|
||||
///
|
||||
pub(super) struct InformationSchemaClusterInfo {
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfo {
|
||||
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
|
||||
Self {
|
||||
schema: Self::schema(),
|
||||
catalog_manager,
|
||||
start_time_ms: common_time::util::current_time_millis() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
START_TIME,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
|
||||
InformationSchemaClusterInfoBuilder::new(
|
||||
self.schema.clone(),
|
||||
self.catalog_manager.clone(),
|
||||
self.start_time_ms,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl InformationTable for InformationSchemaClusterInfo {
|
||||
fn table_id(&self) -> TableId {
|
||||
INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID
|
||||
}
|
||||
|
||||
fn table_name(&self) -> &'static str {
|
||||
CLUSTER_INFO
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_cluster_info(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
.context(InternalSnafu)?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct InformationSchemaClusterInfoBuilder {
|
||||
schema: SchemaRef,
|
||||
start_time_ms: u64,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
peer_ids: Int64VectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
versions: StringVectorBuilder,
|
||||
git_commits: StringVectorBuilder,
|
||||
start_times: TimestampMillisecondVectorBuilder,
|
||||
uptimes: StringVectorBuilder,
|
||||
active_times: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaClusterInfoBuilder {
|
||||
fn new(
|
||||
schema: SchemaRef,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
start_time_ms: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
catalog_manager,
|
||||
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
start_time_ms,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct the `information_schema.cluster_info` virtual table
|
||||
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
|
||||
|
||||
match mode {
|
||||
Mode::Standalone => {
|
||||
let build_info = common_version::build_info();
|
||||
|
||||
self.add_node_info(
|
||||
&predicates,
|
||||
NodeInfo {
|
||||
// For the standalone:
|
||||
// - id always 0
|
||||
// - empty string for peer_addr
|
||||
peer: Peer {
|
||||
id: 0,
|
||||
addr: "".to_string(),
|
||||
},
|
||||
last_activity_ts: -1,
|
||||
status: NodeStatus::Standalone,
|
||||
version: build_info.version.to_string(),
|
||||
git_commit: build_info.commit_short.to_string(),
|
||||
// Use `self.start_time_ms` instead.
|
||||
// It's not precise but enough.
|
||||
start_time_ms: self.start_time_ms,
|
||||
},
|
||||
);
|
||||
}
|
||||
Mode::Distributed => {
|
||||
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
|
||||
let node_infos = meta_client
|
||||
.list_nodes(None)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(ListNodesSnafu)?;
|
||||
|
||||
for node_info in node_infos {
|
||||
self.add_node_info(&predicates, node_info);
|
||||
}
|
||||
} else {
|
||||
warn!("Could not find meta client in distributed mode.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
|
||||
let peer_type = node_info.status.role_name();
|
||||
|
||||
let row = [
|
||||
(PEER_ID, &Value::from(node_info.peer.id)),
|
||||
(PEER_TYPE, &Value::from(peer_type)),
|
||||
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
|
||||
(VERSION, &Value::from(node_info.version.as_str())),
|
||||
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
if peer_type == "FRONTEND" || peer_type == "METASRV" {
|
||||
// Always set peer_id to be -1 for frontends and metasrvs
|
||||
self.peer_ids.push(Some(-1));
|
||||
} else {
|
||||
self.peer_ids.push(Some(node_info.peer.id as i64));
|
||||
}
|
||||
|
||||
self.peer_types.push(Some(peer_type));
|
||||
self.peer_addrs.push(Some(&node_info.peer.addr));
|
||||
self.versions.push(Some(&node_info.version));
|
||||
self.git_commits.push(Some(&node_info.git_commit));
|
||||
if node_info.start_time_ms > 0 {
|
||||
self.start_times
|
||||
.push(Some(TimestampMillisecond(Timestamp::new_millisecond(
|
||||
node_info.start_time_ms as i64,
|
||||
))));
|
||||
self.uptimes.push(Some(
|
||||
Self::format_duration_since(node_info.start_time_ms).as_str(),
|
||||
));
|
||||
} else {
|
||||
self.start_times.push(None);
|
||||
self.uptimes.push(None);
|
||||
}
|
||||
|
||||
if node_info.last_activity_ts > 0 {
|
||||
self.active_times.push(Some(
|
||||
Self::format_duration_since(node_info.last_activity_ts as u64).as_str(),
|
||||
));
|
||||
} else {
|
||||
self.active_times.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
fn format_duration_since(ts: u64) -> String {
|
||||
let now = common_time::util::current_time_millis() as u64;
|
||||
let duration_since = now - ts;
|
||||
humantime::format_duration(Duration::from_millis(duration_since)).to_string()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.versions.finish()),
|
||||
Arc::new(self.git_commits.finish()),
|
||||
Arc::new(self.start_times.finish()),
|
||||
Arc::new(self.uptimes.finish()),
|
||||
Arc::new(self.active_times.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl DfPartitionStream for InformationSchemaClusterInfo {
|
||||
fn schema(&self) -> &ArrowSchemaRef {
|
||||
self.schema.arrow_schema()
|
||||
}
|
||||
|
||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
||||
let schema = self.schema.arrow_schema().clone();
|
||||
let mut builder = self.builder();
|
||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
futures::stream::once(async move {
|
||||
builder
|
||||
.make_cluster_info(None)
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -20,9 +20,9 @@ use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -258,7 +258,7 @@ impl InformationSchemaColumnsBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::datetime::DateTime;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -243,7 +243,6 @@ impl InformationSchemaPartitionsBuilder {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_info_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.await
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -14,10 +14,9 @@
|
||||
|
||||
use arrow::array::StringArray;
|
||||
use arrow::compute::kernels::comparison;
|
||||
use common_query::logical_plan::DfExpr;
|
||||
use datafusion::common::ScalarValue;
|
||||
use datafusion::logical_expr::expr::Like;
|
||||
use datafusion::logical_expr::Operator;
|
||||
use datafusion::logical_expr::{Expr, Operator};
|
||||
use datatypes::value::Value;
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
@@ -118,12 +117,12 @@ impl Predicate {
|
||||
}
|
||||
|
||||
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
|
||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||
fn from_expr(expr: Expr) -> Option<Predicate> {
|
||||
match expr {
|
||||
// NOT expr
|
||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
// expr LIKE pattern
|
||||
DfExpr::Like(Like {
|
||||
Expr::Like(Like {
|
||||
negated,
|
||||
expr,
|
||||
pattern,
|
||||
@@ -131,10 +130,10 @@ impl Predicate {
|
||||
..
|
||||
}) if is_column(&expr) && is_string_literal(&pattern) => {
|
||||
// Safety: ensured by gurad
|
||||
let DfExpr::Column(c) = *expr else {
|
||||
let Expr::Column(c) = *expr else {
|
||||
unreachable!();
|
||||
};
|
||||
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -147,10 +146,10 @@ impl Predicate {
|
||||
}
|
||||
}
|
||||
// left OP right
|
||||
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||
// left == right
|
||||
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
|
||||
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -158,8 +157,8 @@ impl Predicate {
|
||||
Some(Predicate::Eq(c.name, v))
|
||||
}
|
||||
// left != right
|
||||
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
|
||||
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -183,14 +182,14 @@ impl Predicate {
|
||||
_ => None,
|
||||
},
|
||||
// [NOT] IN (LIST)
|
||||
DfExpr::InList(list) => {
|
||||
Expr::InList(list) => {
|
||||
match (*list.expr, list.list, list.negated) {
|
||||
// column [NOT] IN (v1, v2, v3, ...)
|
||||
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||
(Expr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||
let mut values = Vec::with_capacity(list.len());
|
||||
for scalar in list {
|
||||
// Safety: checked by `is_all_scalars`
|
||||
let DfExpr::Literal(scalar) = scalar else {
|
||||
let Expr::Literal(scalar) = scalar else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -237,12 +236,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
|
||||
Some(booleans.value(0))
|
||||
}
|
||||
|
||||
fn is_string_literal(expr: &DfExpr) -> bool {
|
||||
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
|
||||
fn is_string_literal(expr: &Expr) -> bool {
|
||||
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
|
||||
}
|
||||
|
||||
fn is_column(expr: &DfExpr) -> bool {
|
||||
matches!(expr, DfExpr::Column(_))
|
||||
fn is_column(expr: &Expr) -> bool {
|
||||
matches!(expr, Expr::Column(_))
|
||||
}
|
||||
|
||||
/// A list of predicate
|
||||
@@ -257,7 +256,7 @@ impl Predicates {
|
||||
let mut predicates = Vec::with_capacity(request.filters.len());
|
||||
|
||||
for filter in &request.filters {
|
||||
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
|
||||
if let Some(predicate) = Predicate::from_expr(filter.clone()) {
|
||||
predicates.push(predicate);
|
||||
}
|
||||
}
|
||||
@@ -286,8 +285,8 @@ impl Predicates {
|
||||
}
|
||||
|
||||
/// Returns true when the values are all [`DfExpr::Literal`].
|
||||
fn is_all_scalars(list: &[DfExpr]) -> bool {
|
||||
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
|
||||
fn is_all_scalars(list: &[Expr]) -> bool {
|
||||
list.iter().all(|v| matches!(v, Expr::Literal(_)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -376,7 +375,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_predicate_like() {
|
||||
// case insensitive
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -403,7 +402,7 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
|
||||
// case sensitive
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -423,7 +422,7 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
|
||||
// not like
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: true,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -437,15 +436,15 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
}
|
||||
|
||||
fn column(name: &str) -> DfExpr {
|
||||
DfExpr::Column(Column {
|
||||
fn column(name: &str) -> Expr {
|
||||
Expr::Column(Column {
|
||||
relation: None,
|
||||
name: name.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn string_literal(v: &str) -> DfExpr {
|
||||
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||
fn string_literal(v: &str) -> Expr {
|
||||
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||
}
|
||||
|
||||
fn match_string_value(v: &Value, expected: &str) -> bool {
|
||||
@@ -463,14 +462,14 @@ mod tests {
|
||||
result
|
||||
}
|
||||
|
||||
fn mock_exprs() -> (DfExpr, DfExpr) {
|
||||
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
|
||||
fn mock_exprs() -> (Expr, Expr) {
|
||||
let expr1 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("a")),
|
||||
op: Operator::Eq,
|
||||
right: Box::new(string_literal("a_value")),
|
||||
});
|
||||
|
||||
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let expr2 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("b")),
|
||||
op: Operator::NotEq,
|
||||
right: Box::new(string_literal("b_value")),
|
||||
@@ -491,17 +490,17 @@ mod tests {
|
||||
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
|
||||
&& match_string_value(v, "b_value")));
|
||||
|
||||
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let and_expr = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(expr1.clone()),
|
||||
op: Operator::And,
|
||||
right: Box::new(expr2.clone()),
|
||||
});
|
||||
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let or_expr = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(expr1.clone()),
|
||||
op: Operator::Or,
|
||||
right: Box::new(expr2.clone()),
|
||||
});
|
||||
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
|
||||
let not_expr = Expr::Not(Box::new(expr1.clone()));
|
||||
|
||||
let and_p = Predicate::from_expr(and_expr).unwrap();
|
||||
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
|
||||
@@ -510,7 +509,7 @@ mod tests {
|
||||
let not_p = Predicate::from_expr(not_expr).unwrap();
|
||||
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
|
||||
|
||||
let inlist_expr = DfExpr::InList(InList {
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
negated: false,
|
||||
@@ -520,7 +519,7 @@ mod tests {
|
||||
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
|
||||
&& match_string_values(values, &["a1", "a2"])));
|
||||
|
||||
let inlist_expr = DfExpr::InList(InList {
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
negated: true,
|
||||
@@ -540,7 +539,7 @@ mod tests {
|
||||
let (expr1, expr2) = mock_exprs();
|
||||
|
||||
let request = ScanRequest {
|
||||
filters: vec![expr1.into(), expr2.into()],
|
||||
filters: vec![expr1, expr2],
|
||||
..Default::default()
|
||||
};
|
||||
let predicates = Predicates::from_scan_request(&Some(request));
|
||||
@@ -578,7 +577,7 @@ mod tests {
|
||||
|
||||
let (expr1, expr2) = mock_exprs();
|
||||
let request = ScanRequest {
|
||||
filters: vec![expr1.into(), expr2.into()],
|
||||
filters: vec![expr1, expr2],
|
||||
..Default::default()
|
||||
};
|
||||
let predicates = Predicates::from_scan_request(&Some(request));
|
||||
|
||||
@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -55,7 +55,7 @@ const INIT_CAPACITY: usize = 42;
|
||||
///
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the region storage datanode peer id
|
||||
/// - `peer_addr`: the region storage datanode peer address
|
||||
/// - `peer_addr`: the region storage datanode gRPC peer address
|
||||
/// - `is_leader`: whether the peer is the leader
|
||||
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
|
||||
/// - `down_seconds`: the duration of being offline, in seconds.
|
||||
@@ -179,7 +179,6 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let table_id_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name)
|
||||
.await
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
|
||||
@@ -17,10 +17,10 @@ use std::sync::Arc;
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -28,8 +28,8 @@ use datatypes::prelude::{ConcreteDataType, MutableVector};
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
|
||||
TimestampMillisecondVector, VectorRef,
|
||||
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
||||
VectorRef,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
@@ -45,8 +45,8 @@ pub(super) struct InformationSchemaMetrics {
|
||||
const METRIC_NAME: &str = "metric_name";
|
||||
const METRIC_VALUE: &str = "value";
|
||||
const METRIC_LABELS: &str = "labels";
|
||||
const NODE: &str = "node";
|
||||
const NODE_TYPE: &str = "node_type";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const PEER_TYPE: &str = "peer_type";
|
||||
const TIMESTAMP: &str = "timestamp";
|
||||
|
||||
/// The `information_schema.runtime_metrics` virtual table.
|
||||
@@ -63,8 +63,8 @@ impl InformationSchemaMetrics {
|
||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(
|
||||
TIMESTAMP,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
@@ -104,6 +104,7 @@ impl InformationTable for InformationSchemaMetrics {
|
||||
.map_err(Into::into)
|
||||
}),
|
||||
));
|
||||
|
||||
Ok(Box::pin(
|
||||
RecordBatchStreamAdapter::try_new(stream)
|
||||
.map_err(BoxedError::new)
|
||||
@@ -118,6 +119,8 @@ struct InformationSchemaMetricsBuilder {
|
||||
metric_names: StringVectorBuilder,
|
||||
metric_values: Float64VectorBuilder,
|
||||
metric_labels: StringVectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
peer_types: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaMetricsBuilder {
|
||||
@@ -127,13 +130,24 @@ impl InformationSchemaMetricsBuilder {
|
||||
metric_names: StringVectorBuilder::with_capacity(42),
|
||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(42),
|
||||
peer_types: StringVectorBuilder::with_capacity(42),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
|
||||
fn add_metric(
|
||||
&mut self,
|
||||
metric_name: &str,
|
||||
labels: String,
|
||||
metric_value: f64,
|
||||
peer: Option<&str>,
|
||||
peer_type: &str,
|
||||
) {
|
||||
self.metric_names.push(Some(metric_name));
|
||||
self.metric_values.push(Some(metric_value));
|
||||
self.metric_labels.push(Some(&labels));
|
||||
self.peer_addrs.push(peer);
|
||||
self.peer_types.push(Some(peer_type));
|
||||
}
|
||||
|
||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
||||
@@ -170,18 +184,19 @@ impl InformationSchemaMetricsBuilder {
|
||||
.join(", "),
|
||||
// Safety: always has a sample
|
||||
ts.samples[0].value,
|
||||
// The peer column is always `None` for standalone
|
||||
None,
|
||||
"STANDALONE",
|
||||
);
|
||||
}
|
||||
|
||||
// FIXME(dennis): fetching other peers metrics
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let rows_num = self.metric_names.len();
|
||||
let unknowns = Arc::new(ConstantVector::new(
|
||||
Arc::new(StringVector::from(vec!["unknown"])),
|
||||
rows_num,
|
||||
));
|
||||
|
||||
let timestamps = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMillisecondVector::from_slice([
|
||||
current_time_millis(),
|
||||
@@ -193,9 +208,8 @@ impl InformationSchemaMetricsBuilder {
|
||||
Arc::new(self.metric_names.finish()),
|
||||
Arc::new(self.metric_values.finish()),
|
||||
Arc::new(self.metric_labels.finish()),
|
||||
// TODO(dennis): supports node and node_type for cluster
|
||||
unknowns.clone(),
|
||||
unknowns,
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
Arc::new(self.peer_types.finish()),
|
||||
timestamps,
|
||||
];
|
||||
|
||||
@@ -243,8 +257,8 @@ mod tests {
|
||||
assert!(result_literal.contains(METRIC_NAME));
|
||||
assert!(result_literal.contains(METRIC_VALUE));
|
||||
assert!(result_literal.contains(METRIC_LABELS));
|
||||
assert!(result_literal.contains(NODE));
|
||||
assert!(result_literal.contains(NODE_TYPE));
|
||||
assert!(result_literal.contains(PEER_ADDR));
|
||||
assert!(result_literal.contains(PEER_TYPE));
|
||||
assert!(result_literal.contains(TIMESTAMP));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -177,7 +177,7 @@ impl InformationSchemaTableConstraintsBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let keys = &table.table_info().meta.primary_key_indices;
|
||||
|
||||
@@ -40,5 +40,6 @@ pub const GLOBAL_STATUS: &str = "global_status";
|
||||
pub const SESSION_STATUS: &str = "session_status";
|
||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
||||
pub const PARTITIONS: &str = "partitions";
|
||||
pub const REGION_PEERS: &str = "greptime_region_peers";
|
||||
pub const REGION_PEERS: &str = "region_peers";
|
||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||
pub const CLUSTER_INFO: &str = "cluster_info";
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -161,7 +161,7 @@ impl InformationSchemaTablesBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
|
||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
|
||||
|
||||
while let Some(table) = stream.try_next().await? {
|
||||
let table_info = table.table_info();
|
||||
|
||||
53
src/catalog/src/information_schema/utils.rs
Normal file
53
src/catalog/src/information_schema/utils.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::{Result, UpgradeWeakCatalogManagerRefSnafu};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Try to get the server running mode from `[CatalogManager]` weak reference.
|
||||
pub fn running_mode(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Mode>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.running_mode())
|
||||
.copied())
|
||||
}
|
||||
|
||||
/// Try to get the `[MetaClient]` from `[CatalogManager]` weak reference.
|
||||
pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<Arc<MetaClient>>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
let meta_client = match catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
{
|
||||
None => None,
|
||||
Some(manager) => manager.meta_client(),
|
||||
};
|
||||
|
||||
Ok(meta_client)
|
||||
}
|
||||
@@ -16,5 +16,7 @@ pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend}
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
@@ -350,6 +350,13 @@ pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl MetaKvBackend {
|
||||
/// Constructs a [MetaKvBackend].
|
||||
pub fn new(client: Arc<MetaClient>) -> MetaKvBackend {
|
||||
MetaKvBackend { client }
|
||||
}
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
@@ -450,9 +457,8 @@ mod tests {
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
||||
DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest,
|
||||
RangeResponse,
|
||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
@@ -512,13 +518,6 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
_req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
_req: DeleteRangeRequest,
|
||||
|
||||
@@ -15,16 +15,14 @@
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
@@ -33,20 +31,21 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
||||
use crate::error::{
|
||||
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
|
||||
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -56,64 +55,33 @@ use crate::CatalogManager;
|
||||
/// comes from `SystemCatalog`, which is static and read-only.
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
struct TableCacheInvalidator {
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
impl TableCacheInvalidator {
|
||||
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
|
||||
Self { table_cache }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for TableCacheInvalidator {
|
||||
async fn invalidate(
|
||||
&self,
|
||||
_ctx: &Context,
|
||||
caches: Vec<CacheIdent>,
|
||||
) -> common_meta::error::Result<()> {
|
||||
for cache in caches {
|
||||
if let CacheIdent::TableName(table_name) = cache {
|
||||
let table_cache_key = format_full_table_name(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
self.table_cache.invalidate(&table_cache_key).await;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
}
|
||||
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub async fn new(
|
||||
pub fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
backend: KvBackendRef,
|
||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
) -> Arc<Self> {
|
||||
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(TABLE_CACHE_TTL)
|
||||
.time_to_idle(TABLE_CACHE_TTI)
|
||||
.build();
|
||||
multi_cache_invalidator
|
||||
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
|
||||
.await;
|
||||
|
||||
Arc::new_cyclic(|me| Self {
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
mode,
|
||||
meta_client,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
@@ -123,10 +91,26 @@ impl KvBackendCatalogManager {
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
table_cache,
|
||||
cache_registry,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server running mode.
|
||||
pub fn running_mode(&self) -> &Mode {
|
||||
&self.mode
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `[MetaClient]`.
|
||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||
self.meta_client.clone()
|
||||
}
|
||||
|
||||
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
|
||||
self.partition_manager.clone()
|
||||
}
|
||||
@@ -200,7 +184,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
if self.system_catalog.schema_exists(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -212,7 +196,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
if self.system_catalog.table_exists(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -227,67 +211,32 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
if let Some(table) = self
|
||||
.system_catalog
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let init = async {
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
build_table(table_info_value)
|
||||
};
|
||||
|
||||
match self
|
||||
.table_cache
|
||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
||||
table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(table) => Ok(Some(table)),
|
||||
Err(err) => match err.as_ref() {
|
||||
TableCacheNotGet { .. } => Ok(None),
|
||||
_ => Err(err),
|
||||
},
|
||||
}
|
||||
.map_err(|err| GetTableCache {
|
||||
err_msg: err.to_string(),
|
||||
})
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
let sys_tables = try_stream!({
|
||||
// System tables
|
||||
let sys_table_names = self.system_catalog.table_names(schema);
|
||||
@@ -368,11 +317,11 @@ impl SystemCatalog {
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exist(&self, schema: &str) -> bool {
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
|
||||
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
/// [TableCache] caches the [TableName] to [TableRef] mapping.
|
||||
pub type TableCache = CacheContainer<TableName, TableRef, CacheIdent>;
|
||||
|
||||
/// Constructs a [TableCache].
|
||||
pub fn new_table_cache(
|
||||
name: String,
|
||||
cache: Cache<TableName, TableRef>,
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> TableCache {
|
||||
let init = init_factory(table_info_cache, table_name_cache);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> Initializer<TableName, TableRef> {
|
||||
Arc::new(move |table_name| {
|
||||
let table_info_cache = table_info_cache.clone();
|
||||
let table_name_cache = table_name_cache.clone();
|
||||
Box::pin(async move {
|
||||
let table_id = table_name_cache
|
||||
.get_by_ref(table_name)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
let table_info = table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
|
||||
Ok(Some(DistTable::table(table_info)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableName, TableRef>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, MetaResult<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableName(table_name) = ident {
|
||||
cache.invalidate(table_name).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableName(_))
|
||||
}
|
||||
@@ -59,11 +59,7 @@ pub trait CatalogManager: Send + Sync {
|
||||
) -> Result<Option<TableRef>>;
|
||||
|
||||
/// Returns all tables with a stream by catalog and schema.
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>>;
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>>;
|
||||
}
|
||||
|
||||
pub type CatalogManagerRef = Arc<dyn CatalogManager>;
|
||||
|
||||
@@ -117,11 +117,7 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn tables<'a>(
|
||||
&'a self,
|
||||
catalog: &'a str,
|
||||
schema: &'a str,
|
||||
) -> BoxStream<'a, Result<TableRef>> {
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
let catalogs = self.catalogs.read().unwrap();
|
||||
|
||||
let Some(schemas) = catalogs.get(catalog) else {
|
||||
@@ -141,11 +137,11 @@ impl CatalogManager for MemoryCatalogManager {
|
||||
|
||||
let tables = tables.values().cloned().collect::<Vec<_>>();
|
||||
|
||||
return Box::pin(try_stream!({
|
||||
Box::pin(try_stream!({
|
||||
for table in tables {
|
||||
yield table;
|
||||
}
|
||||
}));
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,9 +364,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let stream = catalog_list
|
||||
.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME)
|
||||
.await;
|
||||
let stream = catalog_list.tables(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME);
|
||||
let tables = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||
assert_eq!(tables.len(), 1);
|
||||
assert_eq!(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user