Compare commits

..

2 Commits

Author SHA1 Message Date
Ruihang Xia
94409967be Merge branch 'main' into create-view 2024-04-22 21:08:22 +08:00
Ruihang Xia
7503992d61 add statement
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-04-17 19:13:54 +08:00
962 changed files with 19802 additions and 62994 deletions

View File

@@ -24,7 +24,3 @@ GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests # Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002 GT_MYSQL_ADDR = localhost:4002
# Setting for unstable fuzz tests
GT_FUZZ_BINARY_PATH=/path/to/
GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime

View File

@@ -1,7 +1,7 @@
--- ---
name: Bug report name: Bug report
description: Is something not working? Help us fix it! description: Is something not working? Help us fix it!
labels: [ "C-bug" ] labels: [ "bug" ]
body: body:
- type: markdown - type: markdown
attributes: attributes:

View File

@@ -4,5 +4,5 @@ contact_links:
url: https://greptime.com/slack url: https://greptime.com/slack
about: Get free help from the Greptime community about: Get free help from the Greptime community
- name: Greptime Community Discussion - name: Greptime Community Discussion
url: https://github.com/greptimeTeam/discussions url: https://github.com/greptimeTeam/greptimedb/discussions
about: Get free help from the Greptime community about: Get free help from the Greptime community

View File

@@ -1,7 +1,7 @@
--- ---
name: Enhancement name: Enhancement
description: Suggest an enhancement to existing functionality description: Suggest an enhancement to existing functionality
labels: [ "C-enhancement" ] labels: [ "enhancement" ]
body: body:
- type: dropdown - type: dropdown
id: type id: type

View File

@@ -1,7 +1,7 @@
--- ---
name: New Feature name: Feature request
description: Suggest a new feature for GreptimeDB description: Suggest a new feature for GreptimeDB
labels: [ "C-feature" ] labels: [ "feature request" ]
body: body:
- type: markdown - type: markdown
id: info id: info

View File

@@ -1,18 +0,0 @@
name: Build and push CI Docker image
description: Build and push CI Docker image to local registry
inputs:
binary_path:
default: "./bin"
description: "Binary path"
runs:
using: composite
steps:
- name: Build and push to local registry
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
push: true
tags: localhost:5001/greptime/greptimedb:latest
build-args: |
BINARY_PATH=${{ inputs.binary_path }}

View File

@@ -22,15 +22,15 @@ inputs:
build-dev-builder-ubuntu: build-dev-builder-ubuntu:
description: Build dev-builder-ubuntu image description: Build dev-builder-ubuntu image
required: false required: false
default: "true" default: 'true'
build-dev-builder-centos: build-dev-builder-centos:
description: Build dev-builder-centos image description: Build dev-builder-centos image
required: false required: false
default: "true" default: 'true'
build-dev-builder-android: build-dev-builder-android:
description: Build dev-builder-android image description: Build dev-builder-android image
required: false required: false
default: "true" default: 'true'
runs: runs:
using: composite using: composite
steps: steps:
@@ -47,7 +47,7 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=ubuntu \ BASE_IMAGE=ubuntu \
BUILDX_MULTI_PLATFORM_BUILD=all \ BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
@@ -58,7 +58,7 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=centos \ BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \ BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }} IMAGE_TAG=${{ inputs.version }}
@@ -72,5 +72,5 @@ runs:
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
IMAGE_TAG=${{ inputs.version }} && \ IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }} docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -24,14 +24,6 @@ inputs:
description: Build android artifacts description: Build android artifacts
required: false required: false
default: 'false' default: 'false'
image-namespace:
description: Image Namespace
required: false
default: 'greptime'
image-registry:
description: Image Registry
required: false
default: 'docker.io'
runs: runs:
using: composite using: composite
steps: steps:
@@ -43,9 +35,7 @@ runs:
make build-by-dev-builder \ make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \ FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \ BASE_IMAGE=${{ inputs.base-image }}
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
@@ -63,9 +53,7 @@ runs:
shell: bash shell: bash
if: ${{ inputs.build-android-artifacts == 'true' }} if: ${{ inputs.build-android-artifacts == 'true' }}
run: | run: |
cd ${{ inputs.working-dir }} && make strip-android-bin \ cd ${{ inputs.working-dir }} && make strip-android-bin
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }}
- name: Upload android artifacts - name: Upload android artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts

View File

@@ -16,7 +16,7 @@ inputs:
dev-mode: dev-mode:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false
default: "false" default: 'false'
working-dir: working-dir:
description: Working directory to build the artifacts description: Working directory to build the artifacts
required: false required: false
@@ -30,9 +30,7 @@ runs:
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance. # NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
run: | run: |
cd ${{ inputs.working-dir }} && \ cd ${{ inputs.working-dir }} && \
make run-it-in-container BUILD_JOBS=4 \ make run-it-in-container BUILD_JOBS=4
IMAGE_NAMESPACE=i8k6a5e1/greptime \
IMAGE_REGISTRY=public.ecr.aws
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed. if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
@@ -51,8 +49,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime without pyo3 - name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }} if: ${{ inputs.dev-mode == 'false' }}
@@ -64,8 +60,6 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build. - name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash shell: bash
@@ -74,7 +68,7 @@ runs:
- name: Build greptime on centos base image - name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64. if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
with: with:
base-image: centos base-image: centos
features: servers/dashboard features: servers/dashboard
@@ -82,17 +76,13 @@ runs:
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }} artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime
- name: Build greptime on android base image - name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds arm64 greptime binary for android if the host machine amd64. if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build android base image on amd64.
with: with:
base-image: android base-image: android
artifacts-dir: greptime-android-arm64-${{ inputs.version }} artifacts-dir: greptime-android-arm64-${{ inputs.version }}
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
build-android-artifacts: true build-android-artifacts: true
image-registry: public.ecr.aws
image-namespace: i8k6a5e1/greptime

View File

@@ -59,15 +59,9 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
# linker that prevents backtraces from getting printed correctly.
#
# <https://github.com/rust-lang/rust/issues/113783>
- name: Run integration tests - name: Run integration tests
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: | run: |
make test sqlness-test make test sqlness-test
@@ -81,8 +75,6 @@ runs:
- name: Build greptime binary - name: Build greptime binary
shell: bash shell: bash
env:
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
run: | run: |
make build \ make build \
CARGO_PROFILE=${{ inputs.cargo-profile }} \ CARGO_PROFILE=${{ inputs.cargo-profile }} \

View File

@@ -26,6 +26,8 @@ runs:
using: composite using: composite
steps: steps:
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install rust toolchain - name: Install rust toolchain
uses: dtolnay/rust-toolchain@master uses: dtolnay/rust-toolchain@master
@@ -59,9 +61,6 @@ runs:
if: ${{ inputs.disable-run-tests == 'false' }} if: ${{ inputs.disable-run-tests == 'false' }}
shell: pwsh shell: pwsh
run: make test sqlness-test run: make test sqlness-test
env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1
- name: Upload sqlness logs - name: Upload sqlness logs
if: ${{ failure() }} # Only upload logs when the integration tests failed. if: ${{ failure() }} # Only upload logs when the integration tests failed.

View File

@@ -3,17 +3,11 @@ description: 'Fuzz test given setup and service'
inputs: inputs:
target: target:
description: "The fuzz target to test" description: "The fuzz target to test"
required: true
max-total-time:
description: "Max total time(secs)"
required: true
unstable:
default: 'false'
description: "Enable unstable feature"
runs: runs:
using: composite using: composite
steps: steps:
- name: Run Fuzz Test - name: Run Fuzz Test
shell: bash shell: bash
run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }} run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
env:
GT_MYSQL_ADDR: 127.0.0.1:4002

View File

@@ -1,16 +0,0 @@
name: Setup cyborg environment
description: Setup cyborg environment
runs:
using: composite
steps:
- uses: actions/setup-node@v4
with:
node-version: 22
- uses: pnpm/action-setup@v3
with:
package_json_file: 'cyborg/package.json'
run_install: true
- name: Describe the Environment
working-directory: cyborg
shell: bash
run: pnpm tsx -v

View File

@@ -1,25 +0,0 @@
name: Setup Etcd cluster
description: Deploy Etcd cluster on Kubernetes
inputs:
etcd-replicas:
default: 3
description: "Etcd replicas"
namespace:
default: "etcd-cluster"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm upgrade \
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
--set replicaCount=${{ inputs.etcd-replicas }} \
--set resources.requests.cpu=50m \
--set resources.requests.memory=128Mi \
--set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,89 +0,0 @@
name: Setup GreptimeDB cluster
description: Deploy GreptimeDB cluster on Kubernetes
inputs:
frontend-replicas:
default: 2
description: "Number of Frontend replicas"
datanode-replicas:
default: 2
description: "Number of Datanode replicas"
meta-replicas:
default: 3
description: "Number of Metasrv replicas"
image-registry:
default: "docker.io"
description: "Image registry"
image-repository:
default: "greptime/greptimedb"
description: "Image repository"
image-tag:
default: "latest"
description: 'Image tag'
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"
runs:
using: composite
steps:
- name: Install GreptimeDB operator
shell: bash
run: |
helm repo add greptime https://greptimeteam.github.io/helm-charts/
helm repo update
helm upgrade \
--install \
--create-namespace \
greptimedb-operator greptime/greptimedb-operator \
-n greptimedb-admin \
--wait \
--wait-for-jobs
- name: Install GreptimeDB cluster
shell: bash
run: |
helm upgrade \
--install my-greptimedb \
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
shell: bash
run: |
while true; do
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
if [ "$PHASE" == "Running" ]; then
echo "Cluster is ready"
break
else
echo "Cluster is not ready yet: Current phase: $PHASE"
kubectl get pods -n my-greptimedb
sleep 5 # wait for 5 seconds before check again.
fi
done
- name: Print GreptimeDB info
if: always()
shell: bash
run: |
kubectl get all --show-labels -n my-greptimedb
- name: Describe Nodes
if: always()
shell: bash
run: |
kubectl describe nodes

View File

@@ -1,18 +0,0 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8

View File

@@ -1,38 +0,0 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[storage]
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,34 +0,0 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,45 +0,0 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123

View File

@@ -1,24 +0,0 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"
runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}

View File

@@ -1,10 +0,0 @@
name: Setup Kind
description: Deploy Kind
runs:
using: composite
steps:
- uses: actions/checkout@v4
- name: Create kind cluster
shell: bash
run: |
./.github/scripts/kind-with-registry.sh

View File

@@ -1,24 +0,0 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"
runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio

View File

@@ -57,14 +57,3 @@ runs:
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }} AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: | run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
- name: Export kind logs
if: failure()
shell: bash
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: kind-logs
path: /tmp/kind
retention-days: 3

4
.github/doc-label-config.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
Doc not needed:
- '- \[x\] This PR does not require documentation updates.'
Doc update required:
- '- \[ \] This PR does not require documentation updates.'

View File

@@ -0,0 +1,13 @@
{
"LABEL": {
"name": "breaking change",
"color": "D93F0B"
},
"CHECKS": {
"regexp": "^(?:(?!!:).)*$",
"ignoreLabels": [
"ignore-title"
],
"alwaysPassCI": true
}
}

12
.github/pr-title-checker-config.json vendored Normal file
View File

@@ -0,0 +1,12 @@
{
"LABEL": {
"name": "Invalid PR Title",
"color": "B60205"
},
"CHECKS": {
"regexp": "^(feat|fix|test|refactor|chore|style|docs|perf|build|ci|revert)(\\(.*\\))?\\!?:.*",
"ignoreLabels": [
"ignore-title"
]
}
}

View File

@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
## Checklist ## Checklist
- [ ] I have written the necessary rustdoc comments. - [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests. - [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates. - [x] This PR does not require documentation updates.

View File

@@ -1,66 +0,0 @@
#!/usr/bin/env bash
set -e
set -o pipefail
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --wait 2m --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@@ -13,7 +13,7 @@ on:
name: Build API docs name: Build API docs
env: env:
RUST_TOOLCHAIN: nightly-2024-04-20 RUST_TOOLCHAIN: nightly-2024-04-18
jobs: jobs:
apidoc: apidoc:

View File

@@ -82,9 +82,6 @@ env:
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'. # The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -324,7 +321,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
@@ -333,25 +330,16 @@ jobs:
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - name: Notifiy dev build successful result
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify dev build failed result - name: Notifiy dev build failed result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}

View File

@@ -30,7 +30,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
env: env:
RUST_TOOLCHAIN: nightly-2024-04-20 RUST_TOOLCHAIN: nightly-2024-04-18
jobs: jobs:
check-typos-and-docs: check-typos-and-docs:
@@ -38,26 +38,19 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: crate-ci/typos@master - uses: crate-ci/typos@v1.13.10
- name: Check the config docs - name: Check the config docs
run: | run: |
make config-docs && \ make config-docs && \
git diff --name-only --exit-code ./config/config.md \ git diff --name-only --exit-code ./config/config.md \
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1) || (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check: check:
name: Check name: Check
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ windows-2022, ubuntu-20.04 ] os: [ windows-latest, ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@@ -114,13 +107,9 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-binaries" shared-key: "build-binaries"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime binaries - name: Build greptime binaries
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args run: cargo build --bin greptime --bin sqlness-runner
run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries - name: Pack greptime binaries
shell: bash shell: bash
run: | run: |
@@ -141,54 +130,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: |
tar -xvf ./bins.tar.gz
rm ./bins.tar.gz
- name: Run GreptimeDB
run: |
./bins/greptime standalone start&
- name: Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable-fuzztest:
name: Unstable Fuzz Test
needs: build-greptime-ci
runs-on: ubuntu-latest
strategy:
matrix:
target: [ "unstable_fuzz_create_table_standalone" ]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
@@ -206,223 +148,31 @@ jobs:
shell: bash shell: bash
run: | run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin cargo install cargo-fuzz
- name: Download pre-built binariy - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
name: bin name: bins
path: . path: .
- name: Unzip bianry - name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Run GreptimeDB
run: | run: |
tar -xvf ./bin.tar.gz ./bins/greptime standalone start&
rm ./bin.tar.gz
- name: Run Fuzz Test
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
GT_FUZZ_BINARY_PATH: ./bin/greptime
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
with:
target: ${{ matrix.target }}
max-total-time: 120
unstable: 'true'
- name: Upload unstable fuzz test logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: unstable-fuzz-logs
path: /tmp/unstable-greptime/
retention-days: 3
build-greptime-ci:
name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/ci/greptime bin
- name: Print greptime binaries info
run: ls -lh bin
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: bin
version: current
distributed-fuzztest:
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Disk"
minio: false
kafka: false
values: "with-disk.yaml"
- name: "Minio"
minio: true
kafka: false
values: "with-minio.yaml"
- name: "Minio with Cache"
minio: true
kafka: false
values: "with-minio-and-cache.yaml"
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
sudo apt-get install -y libfuzzer-14-dev
rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image
- name: Download pre-built binariy
uses: actions/download-artifact@v4
with:
name: bin
path: .
- name: Unzip binary
run: |
tar -xvf ./bin.tar.gz
rm ./bin.tar.gz
- name: Build and push GreptimeDB image
uses: ./.github/actions/build-and-push-ci-image
- name: Wait for etcd
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
# Setup cluster for test
- name: Setup GreptimeDB cluster
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
- name: Fuzz Test - name: Fuzz Test
uses: ./.github/actions/fuzz-test uses: ./.github/actions/fuzz-test
env: env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
GT_MYSQL_ADDR: 127.0.0.1:4002
with: with:
target: ${{ matrix.target }} target: ${{ matrix.target }}
max-total-time: 120
- name: Describe Nodes
if: failure()
shell: bash
run: |
kubectl describe nodes
- name: Export kind logs
if: failure()
shell: bash
run: |
kind export logs /tmp/kind
- name: Upload logs
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
if: success()
shell: bash
run: |
kind delete cluster
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker system prune -f
sqlness: sqlness:
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test
needs: build needs: build
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04 ] os: [ ubuntu-20.04 ]
mode:
- name: "Basic"
opts: ""
kafka: false
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@@ -433,18 +183,44 @@ jobs:
path: . path: .
- name: Unzip binaries - name: Unzip binaries
run: tar -xvf ./bins.tar.gz run: tar -xvf ./bins.tar.gz
- if: matrix.mode.kafka - name: Run sqlness
name: Setup kafka server run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v4
with:
name: sqlness-logs
path: /tmp/greptime-*.log
retention-days: 3
sqlness-kafka-wal:
name: Sqlness Test with Kafka Wal
needs: build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-20.04 ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Download pre-built binaries
uses: actions/download-artifact@v4
with:
name: bins
path: .
- name: Unzip binaries
run: tar -xvf ./bins.tar.gz
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
- name: Upload sqlness logs - name: Upload sqlness logs
if: failure() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: sqlness-logs-${{ matrix.mode.name }} name: sqlness-logs-with-kafka-wal
path: /tmp/sqlness* path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
fmt: fmt:
@@ -488,7 +264,7 @@ jobs:
# Shares with `Check` job # Shares with `Check` job
shared-key: "check-lint" shared-key: "check-lint"
- name: Run cargo clippy - name: Run cargo clippy
run: make clippy run: cargo clippy --workspace --all-targets -- -D warnings
coverage: coverage:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
@@ -532,9 +308,6 @@ jobs:
- name: Setup kafka server - name: Setup kafka server
working-directory: tests-integration/fixtures/kafka working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env: env:
@@ -545,11 +318,6 @@ jobs:
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
@@ -562,20 +330,20 @@ jobs:
fail_ci_if_error: false fail_ci_if_error: false
verbose: true verbose: true
# compat: compat:
# name: Compatibility Test name: Compatibility Test
# needs: build needs: build
# runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
# timeout-minutes: 60 timeout-minutes: 60
# steps: steps:
# - uses: actions/checkout@v4 - uses: actions/checkout@v4
# - name: Download pre-built binaries - name: Download pre-built binaries
# uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
# with: with:
# name: bins name: bins
# path: . path: .
# - name: Unzip binaries - name: Unzip binaries
# run: | run: |
# mkdir -p ./bins/current mkdir -p ./bins/current
# tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current tar -xvf ./bins.tar.gz --strip-components=1 -C ./bins/current
# - run: ./tests/compat/test-compat.sh 0.6.0 - run: ./tests/compat/test-compat.sh 0.6.0

39
.github/workflows/doc-issue.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Create Issue in downstream repos
on:
issues:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
doc_issue:
if: github.event.label.name == 'doc update required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in doc repo
uses: dacbd/create-issue-action@v1.2.1
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
cloud_issue:
if: github.event.label.name == 'cloud followup required'
runs-on: ubuntu-20.04
steps:
- name: create an issue in cloud repo
uses: dacbd/create-issue-action@v1.2.1
with:
owner: GreptimeTeam
repo: greptimedb-cloud
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A followup request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}

36
.github/workflows/doc-label.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: "PR Doc Labeler"
on:
pull_request_target:
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
permissions:
pull-requests: write
contents: read
jobs:
triage:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest
steps:
- uses: github/issue-labeler@v3.4
with:
configuration-path: .github/doc-label-config.yml
enable-versioned-regex: false
repo-token: ${{ secrets.GITHUB_TOKEN }}
sync-labels: 1
- name: create an issue in doc repo
uses: dacbd/create-issue-action@v1.2.1
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
with:
owner: GreptimeTeam
repo: docs
token: ${{ secrets.DOCS_REPO_TOKEN }}
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
body: |
A document change request is generated from
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
- name: Check doc labels
uses: docker://agilepathway/pull-request-label-checker:latest
with:
one_of: Doc update required,Doc not needed
repo_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,22 +0,0 @@
name: Follow Up Docs
on:
pull_request_target:
types: [opened, edited]
permissions:
pull-requests: write
contents: read
jobs:
docbot:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue
working-directory: cyborg
run: pnpm tsx bin/follow-up-docs-issue.ts
env:
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -34,14 +34,7 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: crate-ci/typos@master - uses: crate-ci/typos@v1.13.10
license-header-check:
runs-on: ubuntu-20.04
name: Check License Header
steps:
- uses: actions/checkout@v4
- uses: korandoru/hawkeye@v5
check: check:
name: Check name: Check

16
.github/workflows/license.yaml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: License checker
on:
push:
branches:
- main
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
license-header-check:
runs-on: ubuntu-20.04
name: license-header-check
steps:
- uses: actions/checkout@v4
- name: Check License Header
uses: korandoru/hawkeye@v5

View File

@@ -66,13 +66,6 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
@@ -195,7 +188,6 @@ jobs:
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
@@ -228,7 +220,7 @@ jobs:
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ env.IMAGE_NAME }} src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -293,7 +285,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
@@ -302,25 +294,16 @@ jobs:
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - name: Notifiy nightly build successful result
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result - name: Notifiy nightly build failed result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}

View File

@@ -1,6 +1,8 @@
# Nightly CI: runs tests every night for our second tier plaforms (Windows)
on: on:
schedule: schedule:
- cron: "0 23 * * 1-5" - cron: '0 23 * * 1-5'
workflow_dispatch: workflow_dispatch:
name: Nightly CI name: Nightly CI
@@ -10,38 +12,19 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
env: env:
RUST_TOOLCHAIN: nightly-2024-04-20 RUST_TOOLCHAIN: nightly-2024-04-18
permissions:
issues: write
jobs: jobs:
sqlness-test: sqlness:
name: Run sqlness test name: Sqlness Test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-22.04 runs-on: ${{ matrix.os }}
steps: strategy:
- name: Checkout matrix:
uses: actions/checkout@v4 os: [ windows-latest-8-cores ]
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
sqlness-windows:
name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -52,6 +35,14 @@ jobs:
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Run sqlness - name: Run sqlness
run: cargo sqlness run: cargo sqlness
- name: Notify slack if failed
if: failure()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
with:
payload: |
{"text": "Nightly CI failed for sqlness tests"}
- name: Upload sqlness logs - name: Upload sqlness logs
if: always() if: always()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@@ -61,20 +52,15 @@ jobs:
retention-days: 3 retention-days: 3
test-on-windows: test-on-windows:
name: Run tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores runs-on: windows-latest-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install Rust toolchain - name: Install Rust toolchain
uses: dtolnay/rust-toolchain@master uses: dtolnay/rust-toolchain@master
with: with:
@@ -87,7 +73,7 @@ jobs:
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.10" python-version: '3.10'
- name: Install PyArrow Package - name: Install PyArrow Package
run: pip install pyarrow run: pip install pyarrow
- name: Install WSL distribution - name: Install WSL distribution
@@ -97,62 +83,18 @@ jobs:
- name: Running tests - name: Running tests
run: cargo nextest run -F pyo3_backend,dashboard run: cargo nextest run -F pyo3_backend,dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
- name: Notify slack if failed
check-status: if: failure()
name: Check status uses: slackapi/slack-github-action@v1.23.0
needs: [
sqlness-test,
sqlness-windows,
test-on-windows,
]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-20.04
outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }}
steps:
- name: Set check result
id: set-check-result
run: |
echo "check-result=success" >> $GITHUB_OUTPUT
notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
check-status
]
runs-on: ubuntu-20.04
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
- name: Notify dev build successful result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result == 'success' }}
with: with:
payload: | payload: |
{"text": "Nightly CI has completed successfully."} {"text": "Nightly CI failed for cargo test"}
- name: Notify dev build failed result
uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.check-status.outputs.check-result != 'success' }}
with:
payload: |
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -0,0 +1,27 @@
name: Nightly functional tests
on:
schedule:
# At 00:00 on Tuesday.
- cron: '0 0 * * 2'
workflow_dispatch:
jobs:
sqlness-test:
name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run sqlness test
uses: ./.github/actions/sqlness-test
with:
data-root: sqlness-test
aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }}
aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}

29
.github/workflows/pr-title-checker.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: "PR Title Checker"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
- labeled
- unlabeled
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.4.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-checker-config.json"
breaking:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: thehanimo/pr-title-checker@v1.4.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: false
configuration_path: ".github/pr-title-breaking-change-label-config.json"

View File

@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process. # Use env variables to control all the release process.
env: env:
# The arguments of building greptime. # The arguments of building greptime.
RUST_TOOLCHAIN: nightly-2024-04-20 RUST_TOOLCHAIN: nightly-2024-04-18
CARGO_PROFILE: nightly CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
@@ -91,12 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release. # Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.9.0 NEXT_RELEASE_VERSION: v0.8.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs: jobs:
allocate-runners: allocate-runners:
@@ -107,7 +102,7 @@ jobs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }} macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
windows-runner: windows-2022-8-cores windows-runner: windows-latest-8-cores
# The following EC2 resource id will be used for resource releasing. # The following EC2 resource id will be used for resource releasing.
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
@@ -250,7 +245,7 @@ jobs:
- name: Set build macos result - name: Set build macos result
id: set-build-macos-result id: set-build-macos-result
run: | run: |
echo "build-macos-result=success" >> $GITHUB_OUTPUT echo "build-macos-result=success" >> $GITHUB_OUTPUT
build-windows-artifacts: build-windows-artifacts:
name: Build Windows artifacts name: Build Windows artifacts
@@ -323,7 +318,7 @@ jobs:
- name: Set build image result - name: Set build image result
id: set-build-image-result id: set-build-image-result
run: | run: |
echo "build-image-result=success" >> $GITHUB_OUTPUT echo "build-image-result=success" >> $GITHUB_OUTPUT
release-cn-artifacts: release-cn-artifacts:
name: Release artifacts to CN region name: Release artifacts to CN region
@@ -441,7 +436,7 @@ jobs:
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }} if: ${{ always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub, release-images-to-dockerhub,
@@ -452,25 +447,16 @@ jobs:
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - name: Notifiy release successful result
- uses: ./.github/actions/setup-cyborg
- name: Report CI status
id: report-ci-status
working-directory: cyborg
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
- name: Notify release successful result
uses: slackapi/slack-github-action@v1.25.0 uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's release version has completed successfully."} {"text": "GreptimeDB's release version has completed successfully."}
- name: Notify release failed result - name: Notifiy release failed result
uses: slackapi/slack-github-action@v1.25.0 uses: slackapi/slack-github-action@v1.25.0
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }} if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}

View File

@@ -1,24 +0,0 @@
name: Schedule Management
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
maintenance:
name: Periodic Maintenance
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Do Maintenance
working-directory: cyborg
run: pnpm tsx bin/schedule.ts
env:
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -1,21 +0,0 @@
name: "Semantic Pull Request"
on:
pull_request_target:
types:
- opened
- reopened
- edited
jobs:
check:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-cyborg
- name: Check Pull Request
working-directory: cyborg
run: pnpm tsx bin/check-pull-request.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

21
.github/workflows/unassign.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: Auto Unassign
on:
schedule:
- cron: '4 2 * * *'
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
auto-unassign:
name: Auto Unassign
runs-on: ubuntu-latest
steps:
- name: Auto Unassign
uses: tisonspieces/auto-unassign@main
with:
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
repository: ${{ github.repository }}

View File

@@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md).
- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`). - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).

2506
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,6 @@ members = [
"src/api", "src/api",
"src/auth", "src/auth",
"src/catalog", "src/catalog",
"src/cache",
"src/client", "src/client",
"src/cmd", "src/cmd",
"src/common/base", "src/common/base",
@@ -12,7 +11,6 @@ members = [
"src/common/config", "src/common/config",
"src/common/datasource", "src/common/datasource",
"src/common/error", "src/common/error",
"src/common/frontend",
"src/common/function", "src/common/function",
"src/common/macro", "src/common/macro",
"src/common/greptimedb-telemetry", "src/common/greptimedb-telemetry",
@@ -64,14 +62,13 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "0.8.2" version = "0.7.2"
edition = "2021" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
[workspace.lints] [workspace.lints]
clippy.print_stdout = "warn" clippy.print_stdout = "warn"
clippy.print_stderr = "warn" clippy.print_stderr = "warn"
clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn" clippy.implicit_clone = "warn"
clippy.readonly_write_lock = "allow" clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny" rust.unknown_lints = "deny"
@@ -101,18 +98,15 @@ bytemuck = "1.12"
bytes = { version = "1.5", features = ["serde"] } bytes = { version = "1.5", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4" dashmap = "5.4"
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" } datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
derive_builder = "0.12" derive_builder = "0.12"
dotenv = "0.15" dotenv = "0.15"
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76 # TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
@@ -120,7 +114,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "73ac0207ab71dfea48f30259ffdb611501b5ecb8" }
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
itertools = "0.10" itertools = "0.10"
@@ -140,31 +134,28 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.4" }
prost = "0.12" prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8" rand = "0.8"
regex = "1.8" regex = "1.8"
regex-automata = { version = "0.4" } regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
"stream", "stream",
"multipart", "multipart",
] } ] }
rskafka = "0.5" rskafka = "0.5"
rstest = "0.21"
rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
schemars = "0.8" schemars = "0.8"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3" serde_with = "3"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.8" snafu = "0.7"
sysinfo = "0.30" sysinfo = "0.30"
# on branch v0.44.x # on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [ sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
"visitor", "visitor",
] } ] }
strum = { version = "0.25", features = ["derive"] } strum = { version = "0.25", features = ["derive"] }
@@ -173,15 +164,13 @@ tokio = { version = "1.36", features = ["full"] }
tokio-stream = { version = "0.1" } tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] } tonic = { version = "0.11", features = ["tls"] }
tower = { version = "0.4" }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13" zstd = "0.13"
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
auth = { path = "src/auth" } auth = { path = "src/auth" }
cache = { path = "src/cache" }
catalog = { path = "src/catalog" } catalog = { path = "src/catalog" }
client = { path = "src/client" } client = { path = "src/client" }
cmd = { path = "src/cmd" } cmd = { path = "src/cmd" }
@@ -191,7 +180,6 @@ common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" } common-datasource = { path = "src/common/datasource" }
common-decimal = { path = "src/common/decimal" } common-decimal = { path = "src/common/decimal" }
common-error = { path = "src/common/error" } common-error = { path = "src/common/error" }
common-frontend = { path = "src/common/frontend" }
common-function = { path = "src/common/function" } common-function = { path = "src/common/function" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" } common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" } common-grpc = { path = "src/common/grpc" }
@@ -213,7 +201,6 @@ common-wal = { path = "src/common/wal" }
datanode = { path = "src/datanode" } datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" } datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" } file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" }
frontend = { path = "src/frontend" } frontend = { path = "src/frontend" }
index = { path = "src/index" } index = { path = "src/index" }
log-store = { path = "src/log-store" } log-store = { path = "src/log-store" }
@@ -249,15 +236,3 @@ strip = true
lto = "thin" lto = "thin"
debug = false debug = false
incremental = false incremental = false
[profile.ci]
inherits = "dev"
strip = true
[profile.dev.package.sqlness-runner]
debug = false
strip = true
[profile.dev.package.tests-fuzz]
debug = false
strip = true

View File

@@ -54,10 +54,8 @@ ifneq ($(strip $(RELEASE)),)
CARGO_BUILD_OPTS += --release CARGO_BUILD_OPTS += --release
endif endif
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all) ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif endif
@@ -163,13 +161,6 @@ nextest: ## Install nextest tools.
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness cargo sqlness
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz
fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: check .PHONY: check
check: ## Cargo check all the targets. check: ## Cargo check all the targets.
cargo check --workspace --all-targets --all-features cargo check --workspace --all-targets --all-features
@@ -201,16 +192,12 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS} make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
.PHONY: run-cluster-with-etcd
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
##@ Docs ##@ Docs
config-docs: ## Generate configuration documentation from toml files. config-docs: ## Generate configuration documentation from toml files.
docker run --rm \ docker run --rm \
-v ${PWD}:/greptimedb \ -v ${PWD}:/greptimedb \
-w /greptimedb/config \ -w /greptimedb/config \
toml2docs/toml2docs:v0.1.1 \ toml2docs/toml2docs:latest \
-p '##' \ -p '##' \
-t ./config-docs-template.md \ -t ./config-docs-template.md \
-o ./config.md -o ./config.md

View File

@@ -12,6 +12,7 @@ api.workspace = true
arrow.workspace = true arrow.workspace = true
chrono.workspace = true chrono.workspace = true
clap.workspace = true clap.workspace = true
client.workspace = true
common-base.workspace = true common-base.workspace = true
common-telemetry.workspace = true common-telemetry.workspace = true
common-wal.workspace = true common-wal.workspace = true

View File

@@ -0,0 +1,513 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::Instant;
use arrow::array::{ArrayRef, PrimitiveArray, StringArray, TimestampMicrosecondArray};
use arrow::datatypes::{DataType, Float64Type, Int64Type};
use arrow::record_batch::RecordBatch;
use clap::Parser;
use client::api::v1::column::Values;
use client::api::v1::{
Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType,
};
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use futures_util::TryStreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use tokio::task::JoinSet;
const CATALOG_NAME: &str = "greptime";
const SCHEMA_NAME: &str = "public";
#[derive(Parser)]
#[command(name = "NYC benchmark runner")]
struct Args {
/// Path to the dataset
#[arg(short, long)]
path: Option<String>,
/// Batch size of insert request.
#[arg(short = 's', long = "batch-size", default_value_t = 4096)]
batch_size: usize,
/// Number of client threads on write (parallel on file level)
#[arg(short = 't', long = "thread-num", default_value_t = 4)]
thread_num: usize,
/// Number of query iteration
#[arg(short = 'i', long = "iter-num", default_value_t = 3)]
iter_num: usize,
#[arg(long = "skip-write")]
skip_write: bool,
#[arg(long = "skip-read")]
skip_read: bool,
#[arg(short, long, default_value_t = String::from("127.0.0.1:4001"))]
endpoint: String,
}
fn get_file_list<P: AsRef<Path>>(path: P) -> Vec<PathBuf> {
std::fs::read_dir(path)
.unwrap()
.map(|dir| dir.unwrap().path().canonicalize().unwrap())
.collect()
}
fn new_table_name() -> String {
format!("nyc_taxi_{}", chrono::Utc::now().timestamp())
}
async fn write_data(
table_name: &str,
batch_size: usize,
db: &Database,
path: PathBuf,
mpb: MultiProgress,
pb_style: ProgressStyle,
) -> u128 {
let file = std::fs::File::open(&path).unwrap();
let record_batch_reader_builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
let row_num = record_batch_reader_builder
.metadata()
.file_metadata()
.num_rows();
let record_batch_reader = record_batch_reader_builder
.with_batch_size(batch_size)
.build()
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
for record_batch in record_batch_reader {
let record_batch = record_batch.unwrap();
if !is_record_batch_full(&record_batch) {
continue;
}
let (columns, row_count) = convert_record_batch(record_batch);
let request = InsertRequest {
table_name: table_name.to_string(),
columns,
row_count,
};
let requests = InsertRequests {
inserts: vec![request],
};
let now = Instant::now();
db.insert(requests).await.unwrap();
let elapsed = now.elapsed();
total_rpc_elapsed_ms += elapsed.as_millis();
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
let schema = record_batch.schema();
let fields = schema.fields();
let row_count = record_batch.num_rows();
let mut columns = vec![];
for (array, field) in record_batch.columns().iter().zip(fields.iter()) {
let (values, datatype) = build_values(array);
let semantic_type = match field.name().as_str() {
"VendorID" => SemanticType::Tag,
"tpep_pickup_datetime" => SemanticType::Timestamp,
_ => SemanticType::Field,
};
let column = Column {
column_name: field.name().clone(),
values: Some(values),
null_mask: array
.to_data()
.nulls()
.map(|bitmap| bitmap.buffer().as_slice().to_vec())
.unwrap_or_default(),
datatype: datatype.into(),
semantic_type: semantic_type as i32,
..Default::default()
};
columns.push(column);
}
(columns, row_count as _)
}
fn build_values(column: &ArrayRef) -> (Values, ColumnDataType) {
match column.data_type() {
DataType::Int64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Int64Type>>()
.unwrap();
let values = array.values();
(
Values {
i64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Int64,
)
}
DataType::Float64 => {
let array = column
.as_any()
.downcast_ref::<PrimitiveArray<Float64Type>>()
.unwrap();
let values = array.values();
(
Values {
f64_values: values.to_vec(),
..Default::default()
},
ColumnDataType::Float64,
)
}
DataType::Timestamp(_, _) => {
let array = column
.as_any()
.downcast_ref::<TimestampMicrosecondArray>()
.unwrap();
let values = array.values();
(
Values {
timestamp_microsecond_values: values.to_vec(),
..Default::default()
},
ColumnDataType::TimestampMicrosecond,
)
}
DataType::Utf8 => {
let array = column.as_any().downcast_ref::<StringArray>().unwrap();
let values = array.iter().filter_map(|s| s.map(String::from)).collect();
(
Values {
string_values: values,
..Default::default()
},
ColumnDataType::String,
)
}
_ => unimplemented!(),
}
}
fn is_record_batch_full(batch: &RecordBatch) -> bool {
batch.columns().iter().all(|col| col.null_count() == 0)
}
fn create_table_expr(table_name: &str) -> CreateTableExpr {
CreateTableExpr {
catalog_name: CATALOG_NAME.to_string(),
schema_name: SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
desc: String::default(),
column_defs: vec![
ColumnDef {
name: "VendorID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: false,
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
data_type: ColumnDataType::TimestampMicrosecond as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "passenger_count".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "trip_distance".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "RatecodeID".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
data_type: ColumnDataType::String as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "PULocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "DOLocationID".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "payment_type".to_string(),
data_type: ColumnDataType::Int64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "fare_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "extra".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "mta_tax".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tip_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "tolls_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "improvement_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "total_amount".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "congestion_surcharge".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
ColumnDef {
name: "airport_fee".to_string(),
data_type: ColumnDataType::Float64 as i32,
is_nullable: true,
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
..Default::default()
},
],
time_index: "tpep_pickup_datetime".to_string(),
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: true,
table_options: Default::default(),
table_id: None,
engine: "mito".to_string(),
}
}
fn query_set(table_name: &str) -> HashMap<String, String> {
HashMap::from([
(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {table_name};"),
),
(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {table_name} GROUP BY passenger_count"),
)
])
}
async fn do_write(args: &Args, db: &Database, table_name: &str) {
let mut file_list = get_file_list(args.path.clone().expect("Specify data path in argument"));
let mut write_jobs = JoinSet::new();
let create_table_result = db.create(create_table_expr(table_name)).await;
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
)
.unwrap()
.progress_chars("##-");
let multi_progress_bar = MultiProgress::new();
let file_progress = multi_progress_bar.add(ProgressBar::new(file_list.len() as _));
file_progress.inc(0);
let batch_size = args.batch_size;
for _ in 0..args.thread_num {
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
while write_jobs.join_next().await.is_some() {
file_progress.inc(1);
if let Some(path) = file_list.pop() {
let db = db.clone();
let mpb = multi_progress_bar.clone();
let pb_style = progress_bar_style.clone();
let table_name = table_name.to_string();
let _ = write_jobs.spawn(async move {
write_data(&table_name, batch_size, &db, path, mpb, pb_style).await
});
}
}
}
async fn do_query(num_iter: usize, db: &Database, table_name: &str) {
for (query_name, query) in query_set(table_name) {
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let res = db.sql(&query).await.unwrap();
match res.data {
OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => (),
OutputData::Stream(stream) => {
stream.try_collect::<Vec<_>>().await.unwrap();
}
}
let elapsed = now.elapsed();
println!(
"query {}, iteration {}: {}ms",
query_name,
i,
elapsed.as_millis(),
);
}
}
}
fn main() {
let args = Args::parse();
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.thread_num)
.enable_all()
.build()
.unwrap()
.block_on(async {
let client = Client::with_urls(vec![&args.endpoint]);
let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
let table_name = new_table_name();
if !args.skip_write {
do_write(&args, &db, &table_name).await;
}
if !args.skip_read {
do_query(args.iter_num, &db, &table_name).await;
}
})
}

View File

@@ -28,7 +28,6 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
use rand::rngs::SmallRng; use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore; use store_api::logstore::LogStore;
use store_api::storage::RegionId; use store_api::storage::RegionId;
@@ -211,7 +210,7 @@ impl From<Args> for Config {
pub struct Region { pub struct Region {
id: RegionId, id: RegionId,
schema: Vec<ColumnSchema>, schema: Vec<ColumnSchema>,
provider: Provider, wal_options: WalOptions,
next_sequence: AtomicU64, next_sequence: AtomicU64,
next_entry_id: AtomicU64, next_entry_id: AtomicU64,
next_timestamp: AtomicI64, next_timestamp: AtomicI64,
@@ -228,14 +227,10 @@ impl Region {
num_rows: u32, num_rows: u32,
rng_seed: u64, rng_seed: u64,
) -> Self { ) -> Self {
let provider = match wal_options {
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
};
Self { Self {
id, id,
schema, schema,
provider, wal_options,
next_sequence: AtomicU64::new(1), next_sequence: AtomicU64::new(1),
next_entry_id: AtomicU64::new(1), next_entry_id: AtomicU64::new(1),
next_timestamp: AtomicI64::new(1655276557000), next_timestamp: AtomicI64::new(1655276557000),
@@ -263,14 +258,14 @@ impl Region {
self.id, self.id,
self.next_entry_id.fetch_add(1, Ordering::Relaxed), self.next_entry_id.fetch_add(1, Ordering::Relaxed),
&entry, &entry,
&self.provider, &self.wal_options,
) )
.unwrap(); .unwrap();
} }
/// Replays the region. /// Replays the region.
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) { pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap(); let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
while let Some(res) = wal_stream.next().await { while let Some(res) = wal_stream.next().await {
let (_, entry) = res.unwrap(); let (_, entry) = res.unwrap();
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64); metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);

View File

@@ -1,16 +1,10 @@
# Configurations # Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode ## Standalone Mode
{{ toml2docs "./standalone.example.toml" }} {{ toml2docs "./standalone.example.toml" }}
## Distributed Mode ## Cluster Mode
### Frontend ### Frontend

View File

@@ -1,11 +1,5 @@
# Configurations # Configurations
- [Standalone Mode](#standalone-mode)
- [Distributed Mode](#distributed-mode)
- [Frontend](#frontend)
- [Metasrv](#metasrv)
- [Datanode](#datanode)
## Standalone Mode ## Standalone Mode
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
@@ -13,10 +7,6 @@
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. | | `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | `None` | The default timezone of the server. | | `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `30s` | HTTP request timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. |
@@ -24,11 +14,6 @@
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. | | `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -42,13 +27,15 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. | | `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. | | `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. | | `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable |
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
@@ -111,10 +98,6 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. | | `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. | | `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -146,11 +129,9 @@
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
## Distributed Mode ## Cluster Mode
### Frontend ### Frontend
@@ -158,10 +139,6 @@
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. | | `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `default_timezone` | String | `None` | The default timezone of the server. | | `default_timezone` | String | `None` | The default timezone of the server. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. | | `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. | | `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -172,11 +149,6 @@
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
| `grpc.tls.key_path` | String | `None` | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. | | `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
@@ -190,13 +162,15 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | `None` | Certificate file path. | | `postgres.tls.cert_path` | String | `None` | Certificate file path. |
| `postgres.tls.key_path` | String | `None` | Private key file path. | | `postgres.tls.key_path` | String | `None` | Private key file path. |
| `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload | | `postgres.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload |
| `opentsdb` | -- | -- | OpenTSDB protocol options. | | `opentsdb` | -- | -- | OpenTSDB protocol options. |
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable |
| `opentsdb.addr` | String | `127.0.0.1:4242` | OpenTSDB telnet API server address. |
| `opentsdb.runtime_size` | Integer | `2` | The number of server worker threads. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
@@ -214,6 +188,7 @@
| `meta_client.metadata_cache_tti` | String | `5m` | -- | | `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `datanode` | -- | -- | Datanode options. | | `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. | | `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | -- |
| `datanode.client.connect_timeout` | String | `10s` | -- | | `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- | | `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
@@ -232,8 +207,6 @@
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Metasrv ### Metasrv
@@ -248,10 +221,6 @@
| `use_memory_store` | Bool | `false` | Store data in memory. | | `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. | | `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. | | `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
@@ -294,8 +263,6 @@
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
### Datanode ### Datanode
@@ -306,17 +273,12 @@
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. | | `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. | | `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. | | `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
| `rpc_hostname` | String | `None` | The hostname of the datanode. | | `rpc_hostname` | String | `None` | The hostname of the datanode. |
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. | | `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. | | `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
| `heartbeat` | -- | -- | The heartbeat options. | | `heartbeat` | -- | -- | The heartbeat options. |
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. | | `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
@@ -381,10 +343,6 @@
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. | | `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. | | `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
@@ -416,5 +374,3 @@
| `export_metrics.remote_write` | -- | -- | -- | | `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. | | `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. | | `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |

View File

@@ -13,9 +13,6 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized. ## By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
## Parallelism of initializing regions.
init_regions_parallelism = 16
## The gRPC address of the datanode. ## The gRPC address of the datanode.
rpc_addr = "127.0.0.1:3001" rpc_addr = "127.0.0.1:3001"
@@ -35,15 +32,6 @@ rpc_max_send_message_size = "512MB"
## Enable telemetry to collect anonymous usage data. ## Enable telemetry to collect anonymous usage data.
enable_telemetry = true enable_telemetry = true
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The heartbeat options. ## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. ## Interval for sending heartbeat messages to the metasrv.
@@ -336,18 +324,6 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB" page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -452,9 +428,3 @@ url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -5,15 +5,6 @@ mode = "standalone"
## +toml2docs:none-default ## +toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The heartbeat options. ## The heartbeat options.
[heartbeat] [heartbeat]
## Interval for sending heartbeat messages to the metasrv. ## Interval for sending heartbeat messages to the metasrv.
@@ -39,23 +30,6 @@ addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options. ## MySQL server options.
[mysql] [mysql]
## Whether to enable. ## Whether to enable.
@@ -96,7 +70,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql_options.tls` section.
[postgres.tls] [postgres.tls]
## TLS mode. ## TLS mode.
mode = "disable" mode = "disable"
@@ -114,8 +88,12 @@ watch = false
## OpenTSDB protocol options. ## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable OpenTSDB put in HTTP API. ## Whether to enable
enable = true enable = true
## OpenTSDB telnet API server address.
addr = "127.0.0.1:4242"
## The number of server worker threads.
runtime_size = 2
## InfluxDB protocol options. ## InfluxDB protocol options.
[influxdb] [influxdb]
@@ -162,6 +140,7 @@ metadata_cache_tti = "5m"
[datanode] [datanode]
## Datanode client options. ## Datanode client options.
[datanode.client] [datanode.client]
timeout = "10s"
connect_timeout = "10s" connect_timeout = "10s"
tcp_nodelay = true tcp_nodelay = true
@@ -211,9 +190,3 @@ url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -25,15 +25,6 @@ enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix. ## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = "" store_key_prefix = ""
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -150,9 +141,3 @@ url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

View File

@@ -8,15 +8,6 @@ enable_telemetry = true
## +toml2docs:none-default ## +toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## The runtime options.
[runtime]
## The number of threads to execute the runtime for global read operations.
read_rt_size = 8
## The number of threads to execute the runtime for global write operations.
write_rt_size = 8
## The number of threads to execute the runtime for global background operations.
bg_rt_size = 8
## The HTTP server options. ## The HTTP server options.
[http] [http]
## The address to bind the HTTP server. ## The address to bind the HTTP server.
@@ -34,23 +25,6 @@ addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## +toml2docs:none-default
cert_path = ""
## Private key file path.
## +toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options. ## MySQL server options.
[mysql] [mysql]
## Whether to enable. ## Whether to enable.
@@ -91,7 +65,7 @@ addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql_options.tls` section.
[postgres.tls] [postgres.tls]
## TLS mode. ## TLS mode.
mode = "disable" mode = "disable"
@@ -109,8 +83,12 @@ watch = false
## OpenTSDB protocol options. ## OpenTSDB protocol options.
[opentsdb] [opentsdb]
## Whether to enable OpenTSDB put in HTTP API. ## Whether to enable
enable = true enable = true
## OpenTSDB telnet API server address.
addr = "127.0.0.1:4242"
## The number of server worker threads.
runtime_size = 2
## InfluxDB protocol options. ## InfluxDB protocol options.
[influxdb] [influxdb]
@@ -393,18 +371,6 @@ vector_cache_size = "512MB"
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. ## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
page_cache_size = "512MB" page_cache_size = "512MB"
## Whether to enable the experimental write cache.
enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}/write_cache`.
experimental_write_cache_path = ""
## Capacity for write cache.
experimental_write_cache_size = "512MB"
## TTL for write cache.
experimental_write_cache_ttl = "1h"
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
@@ -509,9 +475,3 @@ url = ""
## HTTP headers of Prometheus remote-write carry. ## HTTP headers of Prometheus remote-write carry.
headers = { } headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature.
[tracing]
## The tokio console address.
## +toml2docs:none-default
tokio_console_addr = "127.0.0.1"

2
cyborg/.gitignore vendored
View File

@@ -1,2 +0,0 @@
node_modules
.env

View File

@@ -1,79 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEvent} from "@octokit/webhooks-types";
import {Options, sync as conventionalCommitsParser} from 'conventional-commits-parser';
import conventionalCommitTypes from 'conventional-commit-types';
import _ from "lodash";
const defaultTypes = Object.keys(conventionalCommitTypes.types)
const breakingChangeLabel = "breaking-change"
// These options are copied from [1].
// [1] https://github.com/conventional-changelog/conventional-changelog/blob/3f60b464/packages/conventional-changelog-conventionalcommits/src/parser.js
export const parserOpts: Options = {
headerPattern: /^(\w*)(?:\((.*)\))?!?: (.*)$/,
breakingHeaderPattern: /^(\w*)(?:\((.*)\))?!: (.*)$/,
headerCorrespondence: [
'type',
'scope',
'subject'
],
noteKeywords: ['BREAKING CHANGE', 'BREAKING-CHANGE'],
revertPattern: /^(?:Revert|revert:)\s"?([\s\S]+?)"?\s*This reverts commit (\w*)\./i,
revertCorrespondence: ['header', 'hash'],
issuePrefixes: ['#']
}
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
}
const { data: pull_request } = await client.rest.pulls.get({
owner, repo, pull_number: number,
})
const commit = conventionalCommitsParser(pull_request.title, parserOpts)
core.info(`Receive commit: ${JSON.stringify(commit)}`)
if (!commit.type) {
throw Error(`Malformed commit: ${JSON.stringify(commit)}`)
}
if (!defaultTypes.includes(commit.type)) {
throw Error(`Unexpected type ${JSON.stringify(commit.type)} of commit: ${JSON.stringify(commit)}`)
}
const breakingChanges = _.filter(commit.notes, _.matches({ title: 'BREAKING CHANGE'}))
if (breakingChanges.length > 0) {
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [breakingChangeLabel]
})
}
}
main().catch(handleError)

View File

@@ -1,106 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common";
import {context} from "@actions/github";
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
// @ts-expect-error moduleResolution:nodenext issue 54523
import {RequestError} from "@octokit/request-error";
const needFollowUpDocs = "[x] This PR requires documentation updates."
const labelDocsNotRequired = "docs-not-required"
const labelDocsRequired = "docs-required"
async function main() {
if (!context.payload.pull_request) {
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
}
const client = obtainClient("GITHUB_TOKEN")
const docsClient = obtainClient("DOCS_REPO_TOKEN")
const payload = context.payload as PullRequestEvent
const { owner, repo, number, actor, title, html_url } = {
owner: payload.pull_request.base.user.login,
repo: payload.pull_request.base.repo.name,
number: payload.pull_request.number,
title: payload.pull_request.title,
html_url: payload.pull_request.html_url,
actor: payload.pull_request.user.login,
}
const followUpDocs = checkPullRequestEvent(payload)
if (followUpDocs) {
core.info("Follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsNotRequired,
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsNotRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired],
})
await docsClient.rest.issues.create({
owner: 'GreptimeTeam',
repo: 'docs',
title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`,
assignee: actor,
}).then((res) => {
core.info(`Created issue ${res.data}`)
})
} else {
core.info("No need to follow up docs.")
await client.rest.issues.removeLabel({
owner, repo, issue_number: number, name: labelDocsRequired
}).catch((e: RequestError) => {
if (e.status != 404) {
throw e;
}
core.debug(`Label ${labelDocsRequired} not exist.`)
})
await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
})
}
}
function checkPullRequestEvent(payload: PullRequestEvent) {
switch (payload.action) {
case "opened":
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
case "edited":
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
default:
throw new Error(`${payload.action} is unsupported.`)
}
}
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
// @ts-ignore
return event.pull_request.body?.includes(needFollowUpDocs)
}
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
const previous = event.changes.body?.from.includes(needFollowUpDocs)
const current = event.pull_request.body?.includes(needFollowUpDocs)
// from docs-not-need to docs-required
return (!previous) && current
}
main().catch(handleError)

View File

@@ -1,83 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {handleError, obtainClient} from "@/common"
import {context} from "@actions/github"
import _ from "lodash"
async function main() {
const success = process.env["CI_REPORT_STATUS"] === "true"
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
const {owner, repo} = context.repo
const labels = ['O-ci-failure']
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
labels: labels.join(','),
state: "open",
sort: "created",
direction: "desc",
});
const issue = _.find(issues, (i) => i.title === title);
if (issue) { // exist issue
core.info(`Found previous issue ${issue.html_url}`)
if (!success) {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: failure_comment,
})
} else {
await client.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: success_comment,
})
await client.rest.issues.update({
owner,
repo,
issue_number: issue.number,
state: "closed",
state_reason: "completed",
})
}
core.setOutput("html_url", issue.html_url)
} else if (!success) { // create new issue for failure
const issue = await client.rest.issues.create({
owner,
repo,
title,
labels,
body: failure_comment,
})
core.info(`Created issue ${issue.data.html_url}`)
core.setOutput("html_url", issue.data.html_url)
}
}
main().catch(handleError)

View File

@@ -1,73 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from '@actions/core'
import {GitHub} from "@actions/github/lib/utils"
import _ from "lodash";
import dayjs from "dayjs";
import {handleError, obtainClient} from "@/common";
async function main() {
const client = obtainClient("GITHUB_TOKEN")
await unassign(client)
}
async function unassign(client: InstanceType<typeof GitHub>) {
const owner = "GreptimeTeam"
const repo = "greptimedb"
const dt = dayjs().subtract(14, 'days');
core.info(`Open issues updated before ${dt.toISOString()} will be considered stale.`)
const members = await client.paginate(client.rest.repos.listCollaborators, {
owner,
repo,
permission: "push",
per_page: 100
}).then((members) => members.map((member) => member.login))
core.info(`Members (${members.length}): ${members}`)
const issues = await client.paginate(client.rest.issues.listForRepo, {
owner,
repo,
state: "open",
sort: "created",
direction: "asc",
per_page: 100
})
for (const issue of issues) {
let assignees = [];
if (issue.assignee) {
assignees.push(issue.assignee.login)
}
for (const assignee of issue.assignees) {
assignees.push(assignee.login)
}
assignees = _.uniq(assignees)
assignees = _.difference(assignees, members)
if (assignees.length > 0 && dayjs(issue.updated_at).isBefore(dt)) {
core.info(`Assignees ${assignees} of issue ${issue.number} will be unassigned.`)
await client.rest.issues.removeAssignees({
owner,
repo,
issue_number: issue.number,
assignees: assignees,
})
}
}
}
main().catch(handleError)

View File

@@ -1,26 +0,0 @@
{
"name": "cyborg",
"version": "1.0.0",
"description": "Automator for GreptimeDB Repository Management",
"private": true,
"packageManager": "pnpm@8.15.5",
"dependencies": {
"@actions/core": "^1.10.1",
"@actions/github": "^6.0.0",
"@octokit/request-error": "^6.1.1",
"@octokit/webhooks-types": "^7.5.1",
"conventional-commit-types": "^3.0.0",
"conventional-commits-parser": "^5.0.0",
"dayjs": "^1.11.11",
"dotenv": "^16.4.5",
"lodash": "^4.17.21"
},
"devDependencies": {
"@types/conventional-commits-parser": "^5.0.0",
"@types/lodash": "^4.17.0",
"@types/node": "^20.12.7",
"tsconfig-paths": "^4.2.0",
"tsx": "^4.8.2",
"typescript": "^5.4.5"
}
}

612
cyborg/pnpm-lock.yaml generated
View File

@@ -1,612 +0,0 @@
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@actions/core':
specifier: ^1.10.1
version: 1.10.1
'@actions/github':
specifier: ^6.0.0
version: 6.0.0
'@octokit/request-error':
specifier: ^6.1.1
version: 6.1.1
'@octokit/webhooks-types':
specifier: ^7.5.1
version: 7.5.1
conventional-commit-types:
specifier: ^3.0.0
version: 3.0.0
conventional-commits-parser:
specifier: ^5.0.0
version: 5.0.0
dayjs:
specifier: ^1.11.11
version: 1.11.11
dotenv:
specifier: ^16.4.5
version: 16.4.5
lodash:
specifier: ^4.17.21
version: 4.17.21
devDependencies:
'@types/conventional-commits-parser':
specifier: ^5.0.0
version: 5.0.0
'@types/lodash':
specifier: ^4.17.0
version: 4.17.0
'@types/node':
specifier: ^20.12.7
version: 20.12.7
tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0
tsx:
specifier: ^4.8.2
version: 4.8.2
typescript:
specifier: ^5.4.5
version: 5.4.5
packages:
/@actions/core@1.10.1:
resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==}
dependencies:
'@actions/http-client': 2.2.1
uuid: 8.3.2
dev: false
/@actions/github@6.0.0:
resolution: {integrity: sha512-alScpSVnYmjNEXboZjarjukQEzgCRmjMv6Xj47fsdnqGS73bjJNDpiiXmp8jr0UZLdUB6d9jW63IcmddUP+l0g==}
dependencies:
'@actions/http-client': 2.2.1
'@octokit/core': 5.2.0
'@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.2.0)
'@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.2.0)
dev: false
/@actions/http-client@2.2.1:
resolution: {integrity: sha512-KhC/cZsq7f8I4LfZSJKgCvEwfkE8o1538VoBeoGzokVLLnbFDEAdFD3UhoMklxo2un9NJVBdANOresx7vTHlHw==}
dependencies:
tunnel: 0.0.6
undici: 5.28.4
dev: false
/@esbuild/aix-ppc64@0.20.2:
resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [aix]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm64@0.20.2:
resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==}
engines: {node: '>=12'}
cpu: [arm64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-arm@0.20.2:
resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==}
engines: {node: '>=12'}
cpu: [arm]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/android-x64@0.20.2:
resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==}
engines: {node: '>=12'}
cpu: [x64]
os: [android]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-arm64@0.20.2:
resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==}
engines: {node: '>=12'}
cpu: [arm64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/darwin-x64@0.20.2:
resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==}
engines: {node: '>=12'}
cpu: [x64]
os: [darwin]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-arm64@0.20.2:
resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==}
engines: {node: '>=12'}
cpu: [arm64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/freebsd-x64@0.20.2:
resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==}
engines: {node: '>=12'}
cpu: [x64]
os: [freebsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm64@0.20.2:
resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==}
engines: {node: '>=12'}
cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-arm@0.20.2:
resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==}
engines: {node: '>=12'}
cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ia32@0.20.2:
resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==}
engines: {node: '>=12'}
cpu: [ia32]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-loong64@0.20.2:
resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==}
engines: {node: '>=12'}
cpu: [loong64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-mips64el@0.20.2:
resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==}
engines: {node: '>=12'}
cpu: [mips64el]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-ppc64@0.20.2:
resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==}
engines: {node: '>=12'}
cpu: [ppc64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-riscv64@0.20.2:
resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==}
engines: {node: '>=12'}
cpu: [riscv64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-s390x@0.20.2:
resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==}
engines: {node: '>=12'}
cpu: [s390x]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/linux-x64@0.20.2:
resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==}
engines: {node: '>=12'}
cpu: [x64]
os: [linux]
requiresBuild: true
dev: true
optional: true
/@esbuild/netbsd-x64@0.20.2:
resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [netbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/openbsd-x64@0.20.2:
resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [openbsd]
requiresBuild: true
dev: true
optional: true
/@esbuild/sunos-x64@0.20.2:
resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==}
engines: {node: '>=12'}
cpu: [x64]
os: [sunos]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-arm64@0.20.2:
resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==}
engines: {node: '>=12'}
cpu: [arm64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-ia32@0.20.2:
resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==}
engines: {node: '>=12'}
cpu: [ia32]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@esbuild/win32-x64@0.20.2:
resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==}
engines: {node: '>=12'}
cpu: [x64]
os: [win32]
requiresBuild: true
dev: true
optional: true
/@fastify/busboy@2.1.1:
resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==}
engines: {node: '>=14'}
dev: false
/@octokit/auth-token@4.0.0:
resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==}
engines: {node: '>= 18'}
dev: false
/@octokit/core@5.2.0:
resolution: {integrity: sha512-1LFfa/qnMQvEOAdzlQymH0ulepxbxnCYAKJZfMci/5XJyIHWgEYnDmgnKakbTh7CH2tFQ5O60oYDvns4i9RAIg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/auth-token': 4.0.0
'@octokit/graphql': 7.1.0
'@octokit/request': 8.4.0
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
before-after-hook: 2.2.3
universal-user-agent: 6.0.1
dev: false
/@octokit/endpoint@9.0.5:
resolution: {integrity: sha512-ekqR4/+PCLkEBF6qgj8WqJfvDq65RH85OAgrtnVp1mSxaXF03u2xW/hUdweGS5654IlC0wkNYC18Z50tSYTAFw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/graphql@7.1.0:
resolution: {integrity: sha512-r+oZUH7aMFui1ypZnAvZmn0KSqAUgE1/tUXIWaqUCa1758ts/Jio84GZuzsvUkme98kv0WFY8//n0J1Z+vsIsQ==}
engines: {node: '>= 18'}
dependencies:
'@octokit/request': 8.4.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/openapi-types@20.0.0:
resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==}
dev: false
/@octokit/openapi-types@22.2.0:
resolution: {integrity: sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==}
dev: false
/@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.2.0):
resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==}
engines: {node: '>= 18'}
peerDependencies:
'@octokit/core': '5'
dependencies:
'@octokit/core': 5.2.0
'@octokit/types': 12.6.0
dev: false
/@octokit/request-error@5.1.0:
resolution: {integrity: sha512-GETXfE05J0+7H2STzekpKObFe765O5dlAKUTLNGeH+x47z7JjXHfsHKo5z21D/o/IOZTUEI6nyWyR+bZVP/n5Q==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
deprecation: 2.3.1
once: 1.4.0
dev: false
/@octokit/request-error@6.1.1:
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
engines: {node: '>= 18'}
dependencies:
'@octokit/types': 13.5.0
dev: false
/@octokit/request@8.4.0:
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
engines: {node: '>= 18'}
dependencies:
'@octokit/endpoint': 9.0.5
'@octokit/request-error': 5.1.0
'@octokit/types': 13.5.0
universal-user-agent: 6.0.1
dev: false
/@octokit/types@12.6.0:
resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==}
dependencies:
'@octokit/openapi-types': 20.0.0
dev: false
/@octokit/types@13.5.0:
resolution: {integrity: sha512-HdqWTf5Z3qwDVlzCrP8UJquMwunpDiMPt5er+QjGzL4hqr/vBVY/MauQgS1xWxCDT1oMx1EULyqxncdCY/NVSQ==}
dependencies:
'@octokit/openapi-types': 22.2.0
dev: false
/@octokit/webhooks-types@7.5.1:
resolution: {integrity: sha512-1dozxWEP8lKGbtEu7HkRbK1F/nIPuJXNfT0gd96y6d3LcHZTtRtlf8xz3nicSJfesADxJyDh+mWBOsdLkqgzYw==}
dev: false
/@types/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-loB369iXNmAZglwWATL+WRe+CRMmmBPtpolYzIebFaX4YA3x+BEfLqhUAV9WanycKI3TG1IMr5bMJDajDKLlUQ==}
dependencies:
'@types/node': 20.12.7
dev: true
/@types/lodash@4.17.0:
resolution: {integrity: sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==}
dev: true
/@types/node@20.12.7:
resolution: {integrity: sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==}
dependencies:
undici-types: 5.26.5
dev: true
/JSONStream@1.3.5:
resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==}
hasBin: true
dependencies:
jsonparse: 1.3.1
through: 2.3.8
dev: false
/before-after-hook@2.2.3:
resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==}
dev: false
/conventional-commit-types@3.0.0:
resolution: {integrity: sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==}
dev: false
/conventional-commits-parser@5.0.0:
resolution: {integrity: sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==}
engines: {node: '>=16'}
hasBin: true
dependencies:
JSONStream: 1.3.5
is-text-path: 2.0.0
meow: 12.1.1
split2: 4.2.0
dev: false
/dayjs@1.11.11:
resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
dev: false
/deprecation@2.3.1:
resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==}
dev: false
/dotenv@16.4.5:
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
engines: {node: '>=12'}
dev: false
/esbuild@0.20.2:
resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==}
engines: {node: '>=12'}
hasBin: true
requiresBuild: true
optionalDependencies:
'@esbuild/aix-ppc64': 0.20.2
'@esbuild/android-arm': 0.20.2
'@esbuild/android-arm64': 0.20.2
'@esbuild/android-x64': 0.20.2
'@esbuild/darwin-arm64': 0.20.2
'@esbuild/darwin-x64': 0.20.2
'@esbuild/freebsd-arm64': 0.20.2
'@esbuild/freebsd-x64': 0.20.2
'@esbuild/linux-arm': 0.20.2
'@esbuild/linux-arm64': 0.20.2
'@esbuild/linux-ia32': 0.20.2
'@esbuild/linux-loong64': 0.20.2
'@esbuild/linux-mips64el': 0.20.2
'@esbuild/linux-ppc64': 0.20.2
'@esbuild/linux-riscv64': 0.20.2
'@esbuild/linux-s390x': 0.20.2
'@esbuild/linux-x64': 0.20.2
'@esbuild/netbsd-x64': 0.20.2
'@esbuild/openbsd-x64': 0.20.2
'@esbuild/sunos-x64': 0.20.2
'@esbuild/win32-arm64': 0.20.2
'@esbuild/win32-ia32': 0.20.2
'@esbuild/win32-x64': 0.20.2
dev: true
/fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: true
optional: true
/get-tsconfig@4.7.3:
resolution: {integrity: sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==}
dependencies:
resolve-pkg-maps: 1.0.0
dev: true
/is-text-path@2.0.0:
resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==}
engines: {node: '>=8'}
dependencies:
text-extensions: 2.4.0
dev: false
/json5@2.2.3:
resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
engines: {node: '>=6'}
hasBin: true
dev: true
/jsonparse@1.3.1:
resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==}
engines: {'0': node >= 0.2.0}
dev: false
/lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
dev: false
/meow@12.1.1:
resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==}
engines: {node: '>=16.10'}
dev: false
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
dev: true
/once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: false
/resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
dev: true
/split2@4.2.0:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
dev: false
/strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
dev: true
/text-extensions@2.4.0:
resolution: {integrity: sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==}
engines: {node: '>=8'}
dev: false
/through@2.3.8:
resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
dev: false
/tsconfig-paths@4.2.0:
resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==}
engines: {node: '>=6'}
dependencies:
json5: 2.2.3
minimist: 1.2.8
strip-bom: 3.0.0
dev: true
/tsx@4.8.2:
resolution: {integrity: sha512-hmmzS4U4mdy1Cnzpl/NQiPUC2k34EcNSTZYVJThYKhdqTwuBeF+4cG9KUK/PFQ7KHaAaYwqlb7QfmsE2nuj+WA==}
engines: {node: '>=18.0.0'}
hasBin: true
dependencies:
esbuild: 0.20.2
get-tsconfig: 4.7.3
optionalDependencies:
fsevents: 2.3.3
dev: true
/tunnel@0.0.6:
resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==}
engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'}
dev: false
/typescript@5.4.5:
resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==}
engines: {node: '>=14.17'}
hasBin: true
dev: true
/undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
dev: true
/undici@5.28.4:
resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==}
engines: {node: '>=14.0'}
dependencies:
'@fastify/busboy': 2.1.1
dev: false
/universal-user-agent@6.0.1:
resolution: {integrity: sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==}
dev: false
/uuid@8.3.2:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
hasBin: true
dev: false
/wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: false

View File

@@ -1,30 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {config} from "dotenv";
import {getOctokit} from "@actions/github";
import {GitHub} from "@actions/github/lib/utils";
export function handleError(err: any): void {
console.error(err)
core.setFailed(`Unhandled error: ${err}`)
}
export function obtainClient(token: string): InstanceType<typeof GitHub> {
config()
return getOctokit(process.env[token])
}

View File

@@ -1,14 +0,0 @@
{
"ts-node": {
"require": ["tsconfig-paths/register"]
},
"compilerOptions": {
"module": "NodeNext",
"moduleResolution": "NodeNext",
"target": "ES6",
"paths": {
"@/*": ["./src/*"]
},
"resolveJsonModule": true,
}
}

View File

@@ -1,16 +0,0 @@
FROM ubuntu:22.04
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \
curl
ARG BINARY_PATH
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]

View File

@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
RUN rustup target add aarch64-linux-android RUN rustup target add aarch64-linux-android
# Install cargo-ndk # Install cargo-ndk
RUN cargo install cargo-ndk@3.5.4 RUN cargo install cargo-ndk
ENV ANDROID_NDK_HOME $NDK_ROOT ENV ANDROID_NDK_HOME $NDK_ROOT
# Builder entrypoint. # Builder entrypoint.

View File

@@ -1,102 +0,0 @@
x-custom:
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
common_settings: &common_settings
image: quay.io/coreos/etcd:v3.5.10
entrypoint: /usr/local/bin/etcd
services:
etcd0:
<<: *common_settings
container_name: etcd0
ports:
- 2379:2379
- 2380:2380
command:
- --name=etcd0
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=http://etcd0:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --listen-client-urls=http://0.0.0.0:2379
- --advertise-client-urls=http://etcd0:2379
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd0=http://etcd0:2380
- --initial-cluster-state=new
- *initial_cluster_token
volumes:
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- greptimedb
metasrv:
image: docker.io/greptime/greptimedb:latest
container_name: metasrv
ports:
- 3002:3002
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s
timeout: 3s
retries: 5
depends_on:
etcd0:
condition: service_healthy
networks:
- greptimedb
datanode0:
image: docker.io/greptime/greptimedb:latest
container_name: datanode0
ports:
- 3001:3001
command:
- datanode
- start
- --node-id=0
- --rpc-addr=0.0.0.0:3001
- --rpc-hostname=datanode0:3001
- --metasrv-addr=metasrv:3002
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
frontend0:
image: docker.io/greptime/greptimedb:latest
container_name: frontend0
ports:
- 4000:4000
- 4001:4001
- 4002:4002
- 4003:4003
command:
- frontend
- start
- --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000
- --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003
depends_on:
metasrv:
condition: service_healthy
networks:
- greptimedb
networks:
greptimedb:
name: greptimedb

View File

@@ -23,28 +23,28 @@
## Write performance ## Write performance
| Environment | Ingest rate (rows/s) | | Environment | Ingest rate (rows/s) |
| --------------- | -------------------- | | ------------------ | --------------------- |
| Local | 369581.464 | | Local | 3695814.64 |
| EC2 c5d.2xlarge | 298716.664 | | EC2 c5d.2xlarge | 2987166.64 |
## Query performance ## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) | | Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- | | --------------------- | ---------- | ---------------------- |
| cpu-max-all-1 | 30.56 | 54.74 | | cpu-max-all-1 | 30.56 | 54.74 |
| cpu-max-all-8 | 52.69 | 70.50 | | cpu-max-all-8 | 52.69 | 70.50 |
| double-groupby-1 | 664.30 | 1366.63 | | double-groupby-1 | 664.30 | 1366.63 |
| double-groupby-5 | 1391.26 | 2141.71 | | double-groupby-5 | 1391.26 | 2141.71 |
| double-groupby-all | 2828.94 | 3389.59 | | double-groupby-all | 2828.94 | 3389.59 |
| groupby-orderby-limit | 718.92 | 1213.90 | | groupby-orderby-limit | 718.92 | 1213.90 |
| high-cpu-1 | 29.21 | 52.98 | | high-cpu-1 | 29.21 | 52.98 |
| high-cpu-all | 5514.12 | 7194.91 | | high-cpu-all | 5514.12 | 7194.91 |
| lastpoint | 7571.40 | 9423.41 | | lastpoint | 7571.40 | 9423.41 |
| single-groupby-1-1-1 | 19.09 | 7.77 | | single-groupby-1-1-1 | 19.09 | 7.77 |
| single-groupby-1-1-12 | 27.28 | 51.64 | | single-groupby-1-1-12 | 27.28 | 51.64 |
| single-groupby-1-8-1 | 31.85 | 11.64 | | single-groupby-1-8-1 | 31.85 | 11.64 |
| single-groupby-5-1-1 | 16.14 | 9.67 | | single-groupby-5-1-1 | 16.14 | 9.67 |
| single-groupby-5-1-12 | 27.21 | 53.62 | | single-groupby-5-1-12 | 27.21 | 53.62 |
| single-groupby-5-8-1 | 39.62 | 14.96 | | single-groupby-5-8-1 | 39.62 | 14.96 |

View File

@@ -1,58 +0,0 @@
# TSBS benchmark - v0.8.0
## Environment
### Local
| | |
| ------ | ---------------------------------- |
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
| Memory | 32GB |
| Disk | SOLIDIGM SSDPFKNU010TZ |
| OS | Ubuntu 22.04.2 LTS |
### Amazon EC2
| | |
| ------- | -------------- |
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 50GB (GP3) |
| OS | Ubuntu 22.04.1 |
## Write performance
| Environment | Ingest rate (rows/s) |
| --------------- | -------------------- |
| Local | 315369.66 |
| EC2 c5d.2xlarge | 222148.56 |
## Query performance
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
| --------------------- | ---------- | -------------------- |
| cpu-max-all-1 | 24.63 | 15.29 |
| cpu-max-all-8 | 51.69 | 33.53 |
| double-groupby-1 | 673.51 | 1295.38 |
| double-groupby-5 | 1244.93 | 1993.91 |
| double-groupby-all | 2215.44 | 3056.77 |
| groupby-orderby-limit | 754.50 | 1546.49 |
| high-cpu-1 | 19.62 | 11.58 |
| high-cpu-all | 5402.31 | 8011.43 |
| lastpoint | 6756.12 | 9312.67 |
| single-groupby-1-1-1 | 15.70 | 7.67 |
| single-groupby-1-1-12 | 16.72 | 9.29 |
| single-groupby-1-8-1 | 26.72 | 17.97 |
| single-groupby-5-1-1 | 18.17 | 10.09 |
| single-groupby-5-1-12 | 20.04 | 12.37 |
| single-groupby-5-8-1 | 35.63 | 23.13 |
`single-groupby-1-1-1` query throughput
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
| --------------- | ------------------ | -------------- | ----------------- |
| Local | 50 | 42.87 | 1165.73 |
| Local | 100 | 89.29 | 1119.38 |
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |

View File

@@ -1,136 +0,0 @@
# How to write fuzz tests
This document introduces how to write fuzz tests in GreptimeDB.
## What is a fuzz test
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
## Why we need them
- Find bugs by leveraging random generation
- Integrate with other tests (e.g., e2e)
## Resources
All fuzz test-related resources are located in the `/tests-fuzz` directory.
There are two types of resources: (1) fundamental components and (2) test targets.
### Fundamental components
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
### Test targets
They are located in the `/tests-fuzz/targets` directory, with each file representing an independent fuzz test case. The target utilizes fundamental components to generate SQLs, sends the generated SQLs via specified protocol, and validates the results of SQL execution.
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
```
Rng
|
|
v
ExprGenerator
|
|
v
Intermediate representation (IR)
|
|
+----------------------+----------------------+
| | |
v v v
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
| | |
| | |
v v v
SQL(MySQL Dialect) ..... .....
|
|
v
Fuzz Test
```
(Figure1: Overview of fuzz tests)
For more details about fuzz targets and fundamental components, please refer to this [tracking issue](https://github.com/GreptimeTeam/greptimedb/issues/3174).
## How to add a fuzz test target
1. Create an empty rust source file under the `/tests-fuzz/targets/<fuzz-target>.rs` directory.
2. Register the fuzz test target in the `/tests-fuzz/Cargo.toml` file.
```toml
[[bin]]
name = "<fuzz-target>"
path = "targets/<fuzz-target>.rs"
test = false
bench = false
doc = false
```
3. Define the `FuzzInput` in the `/tests-fuzz/targets/<fuzz-target>.rs`.
```rust
#![no_main]
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
#[derive(Clone, Debug)]
struct FuzzInput {
seed: u64,
}
impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
Ok(FuzzInput { seed })
}
}
```
4. Write your first fuzz test target in the `/tests-fuzz/targets/<fuzz-target>.rs`.
```rust
use libfuzzer_sys::fuzz_target;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use snafu::ResultExt;
use sqlx::{MySql, Pool};
use tests_fuzz::fake::{
merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
MappedGenerator, WordGenerator,
};
use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
use tests_fuzz::generator::Generator;
use tests_fuzz::ir::CreateTableExpr;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{init_greptime_connections, Connections};
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
let Connections { mysql } = init_greptime_connections().await;
let mut rng = ChaChaRng::seed_from_u64(input.seed);
let columns = rng.gen_range(2..30);
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
.columns(columns)
.engine("mito")
.if_not_exists(if_not_exists)
.build()
.unwrap();
let ir = create_table_generator.generate(&mut rng);
let translator = CreateTableExprTranslator;
let sql = translator.translate(&expr).unwrap();
mysql.execute(&sql).await
})
});
```
5. Run your fuzz test target
```bash
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
```
For more details, please refer to this [document](/tests-fuzz/README.md).

View File

@@ -73,7 +73,7 @@ CREATE TABLE cpu (
usage_system DOUBLE, usage_system DOUBLE,
datacenter STRING, datacenter STRING,
TIME INDEX (ts), TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito; PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
``` ```
Then the table's `TableMeta` may look like this: Then the table's `TableMeta` may look like this:
@@ -249,7 +249,7 @@ CREATE TABLE cpu (
usage_system DOUBLE, usage_system DOUBLE,
datacenter STRING, datacenter STRING,
TIME INDEX (ts), TIME INDEX (ts),
PRIMARY KEY(datacenter, host)) ENGINE=mito; PRIMARY KEY(datacenter, host)) ENGINE=mito WITH(regions=1);
select ts, usage_system from cpu; select ts, usage_system from cpu;
``` ```

View File

@@ -7,60 +7,4 @@ Status notify: we are still working on this config. It's expected to change freq
# How to use # How to use
## `greptimedb.json`
Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file. Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file.
## `greptimedb-cluster.json`
This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors.
We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too.
__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed.
### Configuration
Please ensure the following configuration before importing the dashboard into Grafana.
__1. Prometheus scrape config__
Assign `greptime_pod` label to each host target. We use this label to identify each node instance.
```yml
# example config
# only to indicate how to assign labels to each target
# modify yours accordingly
scrape_configs:
- job_name: metasrv
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: metasrv
- job_name: datanode
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode1
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode2
- targets: ['<ip>:<port>']
labels:
greptime_pod: datanode3
- job_name: frontend
static_configs:
- targets: ['<ip>:<port>']
labels:
greptime_pod: frontend
```
__2. Grafana config__
Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported.
### Usage
Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node.

File diff suppressed because it is too large Load Diff

View File

@@ -17,15 +17,12 @@ headerPath = "Apache-2.0.txt"
includes = [ includes = [
"*.rs", "*.rs",
"*.py", "*.py",
"*.ts",
] ]
excludes = [ excludes = [
# copied sources # copied sources
"src/common/base/src/readable_size.rs", "src/common/base/src/readable_size.rs",
"src/common/base/src/secrets.rs",
"src/servers/src/repeated_field.rs", "src/servers/src/repeated_field.rs",
"src/servers/src/http/test_helpers.rs",
] ]
[properties] [properties]

View File

@@ -1,2 +1,2 @@
[toolchain] [toolchain]
channel = "nightly-2024-04-20" channel = "nightly-2024-04-18"

View File

@@ -30,7 +30,6 @@ pub enum Error {
#[snafu(display("Unknown proto column datatype: {}", datatype))] #[snafu(display("Unknown proto column datatype: {}", datatype))]
UnknownColumnDataType { UnknownColumnDataType {
datatype: i32, datatype: i32,
#[snafu(implicit)]
location: Location, location: Location,
#[snafu(source)] #[snafu(source)]
error: prost::DecodeError, error: prost::DecodeError,
@@ -39,14 +38,12 @@ pub enum Error {
#[snafu(display("Failed to create column datatype from {:?}", from))] #[snafu(display("Failed to create column datatype from {:?}", from))]
IntoColumnDataType { IntoColumnDataType {
from: ConcreteDataType, from: ConcreteDataType,
#[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Failed to convert column default constraint, column: {}", column))] #[snafu(display("Failed to convert column default constraint, column: {}", column))]
ConvertColumnDefaultConstraint { ConvertColumnDefaultConstraint {
column: String, column: String,
#[snafu(implicit)]
location: Location, location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -54,7 +51,6 @@ pub enum Error {
#[snafu(display("Invalid column default constraint, column: {}", column))] #[snafu(display("Invalid column default constraint, column: {}", column))]
InvalidColumnDefaultConstraint { InvalidColumnDefaultConstraint {
column: String, column: String,
#[snafu(implicit)]
location: Location, location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },

View File

@@ -20,20 +20,21 @@ use common_decimal::Decimal128;
use common_time::interval::IntervalUnit; use common_time::interval::IntervalUnit;
use common_time::time::Time; use common_time::time::Time;
use common_time::timestamp::TimeUnit; use common_time::timestamp::TimeUnit;
use common_time::{Date, DateTime, Interval, Timestamp}; use common_time::{Date, DateTime, Duration, Interval, Timestamp};
use datatypes::prelude::{ConcreteDataType, ValueRef}; use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::scalars::ScalarVector; use datatypes::scalars::ScalarVector;
use datatypes::types::{ use datatypes::types::{
Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type, DurationType, Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type,
}; };
use datatypes::value::{OrderedF32, OrderedF64, Value}; use datatypes::value::{OrderedF32, OrderedF64, Value};
use datatypes::vectors::{ use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector, BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector, DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
UInt64Vector, VectorRef, TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
}; };
use greptime_proto::v1; use greptime_proto::v1;
use greptime_proto::v1::column_data_type_extension::TypeExt; use greptime_proto::v1::column_data_type_extension::TypeExt;
@@ -126,6 +127,14 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ColumnDataType::IntervalMonthDayNano => { ColumnDataType::IntervalMonthDayNano => {
ConcreteDataType::interval_month_day_nano_datatype() ConcreteDataType::interval_month_day_nano_datatype()
} }
ColumnDataType::DurationSecond => ConcreteDataType::duration_second_datatype(),
ColumnDataType::DurationMillisecond => {
ConcreteDataType::duration_millisecond_datatype()
}
ColumnDataType::DurationMicrosecond => {
ConcreteDataType::duration_microsecond_datatype()
}
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
ColumnDataType::Decimal128 => { ColumnDataType::Decimal128 => {
if let Some(TypeExt::DecimalType(d)) = datatype_wrapper if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
.datatype_ext .datatype_ext
@@ -203,7 +212,11 @@ impl_column_type_functions_with_snake!(
TimeNanosecond, TimeNanosecond,
IntervalYearMonth, IntervalYearMonth,
IntervalDayTime, IntervalDayTime,
IntervalMonthDayNano IntervalMonthDayNano,
DurationSecond,
DurationMillisecond,
DurationMicrosecond,
DurationNanosecond
); );
impl ColumnDataTypeWrapper { impl ColumnDataTypeWrapper {
@@ -257,11 +270,16 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime, IntervalType::DayTime(_) => ColumnDataType::IntervalDayTime,
IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano, IntervalType::MonthDayNano(_) => ColumnDataType::IntervalMonthDayNano,
}, },
ConcreteDataType::Duration(d) => match d {
DurationType::Second(_) => ColumnDataType::DurationSecond,
DurationType::Millisecond(_) => ColumnDataType::DurationMillisecond,
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
},
ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128, ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
ConcreteDataType::Null(_) ConcreteDataType::Null(_)
| ConcreteDataType::List(_) | ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_) | ConcreteDataType::Dictionary(_) => {
| ConcreteDataType::Duration(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail() return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
} }
}; };
@@ -391,6 +409,22 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
interval_month_day_nano_values: Vec::with_capacity(capacity), interval_month_day_nano_values: Vec::with_capacity(capacity),
..Default::default() ..Default::default()
}, },
ColumnDataType::DurationSecond => Values {
duration_second_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMillisecond => Values {
duration_millisecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationMicrosecond => Values {
duration_microsecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::DurationNanosecond => Values {
duration_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
ColumnDataType::Decimal128 => Values { ColumnDataType::Decimal128 => Values {
decimal128_values: Vec::with_capacity(capacity), decimal128_values: Vec::with_capacity(capacity),
..Default::default() ..Default::default()
@@ -442,8 +476,14 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
.interval_month_day_nano_values .interval_month_day_nano_values
.push(convert_i128_to_interval(val.to_i128())), .push(convert_i128_to_interval(val.to_i128())),
}, },
Value::Duration(val) => match val.unit() {
TimeUnit::Second => values.duration_second_values.push(val.value()),
TimeUnit::Millisecond => values.duration_millisecond_values.push(val.value()),
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
},
Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)), Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
Value::List(_) | Value::Duration(_) => unreachable!(), Value::List(_) => unreachable!(),
}); });
column.null_mask = null_mask.into_vec(); column.null_mask = null_mask.into_vec();
} }
@@ -478,10 +518,6 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::Alter(_)) => "ddl.alter", Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table", Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table", Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
Some(Expr::CreateView(_)) => "ddl.create_view",
Some(Expr::DropView(_)) => "ddl.drop_view",
None => "ddl.empty", None => "ddl.empty",
} }
} }
@@ -547,6 +583,10 @@ pub fn pb_value_to_value_ref<'a>(
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds); let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::Interval(interval) ValueRef::Interval(interval)
} }
ValueData::DurationSecondValue(v) => ValueRef::Duration(Duration::new_second(*v)),
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
ValueData::Decimal128Value(v) => { ValueData::Decimal128Value(v) => {
// get precision and scale from datatype_extension // get precision and scale from datatype_extension
if let Some(TypeExt::DecimalType(d)) = datatype_ext if let Some(TypeExt::DecimalType(d)) = datatype_ext
@@ -641,15 +681,26 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
)) ))
} }
}, },
ConcreteDataType::Duration(unit) => match unit {
DurationType::Second(_) => Arc::new(DurationSecondVector::from_vec(
values.duration_second_values,
)),
DurationType::Millisecond(_) => Arc::new(DurationMillisecondVector::from_vec(
values.duration_millisecond_values,
)),
DurationType::Microsecond(_) => Arc::new(DurationMicrosecondVector::from_vec(
values.duration_microsecond_values,
)),
DurationType::Nanosecond(_) => Arc::new(DurationNanosecondVector::from_vec(
values.duration_nanosecond_values,
)),
},
ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values( ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
values.decimal128_values.iter().map(|x| { values.decimal128_values.iter().map(|x| {
Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into() Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
}), }),
)), )),
ConcreteDataType::Null(_) ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
unreachable!() unreachable!()
} }
} }
@@ -798,6 +849,26 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
)) ))
}) })
.collect(), .collect(),
ConcreteDataType::Duration(DurationType::Second(_)) => values
.duration_second_values
.into_iter()
.map(|v| Value::Duration(Duration::new_second(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Millisecond(_)) => values
.duration_millisecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_millisecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Microsecond(_)) => values
.duration_microsecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_microsecond(v)))
.collect(),
ConcreteDataType::Duration(DurationType::Nanosecond(_)) => values
.duration_nanosecond_values
.into_iter()
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
.collect(),
ConcreteDataType::Decimal128(d) => values ConcreteDataType::Decimal128(d) => values
.decimal128_values .decimal128_values
.into_iter() .into_iter()
@@ -810,10 +881,7 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
)) ))
}) })
.collect(), .collect(),
ConcreteDataType::Null(_) ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
| ConcreteDataType::List(_)
| ConcreteDataType::Dictionary(_)
| ConcreteDataType::Duration(_) => {
unreachable!() unreachable!()
} }
} }
@@ -925,10 +993,24 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
)), )),
}, },
}, },
Value::Duration(v) => match v.unit() {
TimeUnit::Second => v1::Value {
value_data: Some(ValueData::DurationSecondValue(v.value())),
},
TimeUnit::Millisecond => v1::Value {
value_data: Some(ValueData::DurationMillisecondValue(v.value())),
},
TimeUnit::Microsecond => v1::Value {
value_data: Some(ValueData::DurationMicrosecondValue(v.value())),
},
TimeUnit::Nanosecond => v1::Value {
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
},
},
Value::Decimal128(v) => v1::Value { Value::Decimal128(v) => v1::Value {
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))), value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
}, },
Value::List(_) | Value::Duration(_) => return None, Value::List(_) => return None,
}; };
Some(proto_value) Some(proto_value)
@@ -965,6 +1047,10 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth, ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime, ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano, ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
ValueData::Decimal128Value(_) => ColumnDataType::Decimal128, ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
}; };
Some(value_type) Some(value_type)
@@ -1022,8 +1108,14 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128())) ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
} }
}), }),
Value::Duration(v) => Some(match v.unit() {
TimeUnit::Second => ValueData::DurationSecondValue(v.value()),
TimeUnit::Millisecond => ValueData::DurationMillisecondValue(v.value()),
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
}),
Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))), Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
Value::List(_) | Value::Duration(_) => unreachable!(), Value::List(_) => unreachable!(),
}, },
} }
} }
@@ -1033,15 +1125,16 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use datatypes::types::{ use datatypes::types::{
Int32Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalYearMonthType, DurationMillisecondType, DurationSecondType, Int32Type, IntervalDayTimeType,
TimeMillisecondType, TimeSecondType, TimestampMillisecondType, TimestampSecondType, IntervalMonthDayNanoType, IntervalYearMonthType, TimeMillisecondType, TimeSecondType,
UInt32Type, TimestampMillisecondType, TimestampSecondType, UInt32Type,
}; };
use datatypes::vectors::{ use datatypes::vectors::{
BooleanVector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, BooleanVector, DurationMicrosecondVector, DurationMillisecondVector,
TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, DurationNanosecondVector, DurationSecondVector, IntervalDayTimeVector,
TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, TimeMicrosecondVector,
TimestampSecondVector, Vector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
}; };
use paste::paste; use paste::paste;
@@ -1117,6 +1210,10 @@ mod tests {
let values = values.interval_month_day_nano_values; let values = values.interval_month_day_nano_values;
assert_eq!(2, values.capacity()); assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
let values = values.duration_millisecond_values;
assert_eq!(2, values.capacity());
let values = values_with_capacity(ColumnDataType::Decimal128, 2); let values = values_with_capacity(ColumnDataType::Decimal128, 2);
let values = values.decimal128_values; let values = values.decimal128_values;
assert_eq!(2, values.capacity()); assert_eq!(2, values.capacity());
@@ -1204,6 +1301,10 @@ mod tests {
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano), ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ColumnDataTypeWrapper::interval_month_day_nano_datatype().into() ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
); );
assert_eq!(
ConcreteDataType::duration_millisecond_datatype(),
ColumnDataTypeWrapper::duration_millisecond_datatype().into()
);
assert_eq!( assert_eq!(
ConcreteDataType::decimal128_datatype(10, 2), ConcreteDataType::decimal128_datatype(10, 2),
ColumnDataTypeWrapper::decimal128_datatype(10, 2).into() ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
@@ -1296,6 +1397,12 @@ mod tests {
.try_into() .try_into()
.unwrap() .unwrap()
); );
assert_eq!(
ColumnDataTypeWrapper::duration_millisecond_datatype(),
ConcreteDataType::duration_millisecond_datatype()
.try_into()
.unwrap()
);
assert_eq!( assert_eq!(
ColumnDataTypeWrapper::decimal128_datatype(10, 2), ColumnDataTypeWrapper::decimal128_datatype(10, 2),
@@ -1449,6 +1556,48 @@ mod tests {
}); });
} }
#[test]
fn test_column_put_duration_values() {
let mut column = Column {
column_name: "test".to_string(),
semantic_type: 0,
values: Some(Values {
..Default::default()
}),
null_mask: vec![],
datatype: 0,
..Default::default()
};
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![1, 2, 3],
column.values.as_ref().unwrap().duration_nanosecond_values
);
let vector = Arc::new(DurationMicrosecondVector::from_vec(vec![7, 8, 9]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![7, 8, 9],
column.values.as_ref().unwrap().duration_microsecond_values
);
let vector = Arc::new(DurationMillisecondVector::from_vec(vec![4, 5, 6]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![4, 5, 6],
column.values.as_ref().unwrap().duration_millisecond_values
);
let vector = Arc::new(DurationSecondVector::from_vec(vec![10, 11, 12]));
push_vals(&mut column, 3, vector);
assert_eq!(
vec![10, 11, 12],
column.values.as_ref().unwrap().duration_second_values
);
}
#[test] #[test]
fn test_column_put_vector() { fn test_column_put_vector() {
use crate::v1::SemanticType; use crate::v1::SemanticType;
@@ -1550,6 +1699,39 @@ mod tests {
assert_eq!(expect, actual); assert_eq!(expect, actual);
} }
#[test]
fn test_convert_duration_values() {
// second
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Second(DurationSecondType)),
Values {
duration_second_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_second(1_i64)),
Value::Duration(Duration::new_second(2_i64)),
Value::Duration(Duration::new_second(3_i64)),
];
assert_eq!(expect, actual);
// millisecond
let actual = pb_values_to_values(
&ConcreteDataType::Duration(DurationType::Millisecond(DurationMillisecondType)),
Values {
duration_millisecond_values: vec![1_i64, 2_i64, 3_i64],
..Default::default()
},
);
let expect = vec![
Value::Duration(Duration::new_millisecond(1_i64)),
Value::Duration(Duration::new_millisecond(2_i64)),
Value::Duration(Duration::new_millisecond(3_i64)),
];
assert_eq!(expect, actual);
}
#[test] #[test]
fn test_convert_interval_values() { fn test_convert_interval_values() {
// year_month // year_month

View File

@@ -14,12 +14,12 @@ workspace = true
[dependencies] [dependencies]
api.workspace = true api.workspace = true
async-trait.workspace = true async-trait.workspace = true
common-base.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-telemetry.workspace = true common-telemetry.workspace = true
digest = "0.10" digest = "0.10"
notify.workspace = true notify.workspace = true
secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10" sha1 = "0.10"
snafu.workspace = true snafu.workspace = true
sql.workspace = true sql.workspace = true

View File

@@ -14,8 +14,8 @@
use std::sync::Arc; use std::sync::Arc;
use common_base::secrets::SecretString;
use digest::Digest; use digest::Digest;
use secrecy::SecretString;
use sha1::Sha1; use sha1::Sha1;
use snafu::{ensure, OptionExt}; use snafu::{ensure, OptionExt};

View File

@@ -34,13 +34,11 @@ pub enum Error {
Io { Io {
#[snafu(source)] #[snafu(source)]
error: std::io::Error, error: std::io::Error,
#[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Auth failed"))] #[snafu(display("Auth failed"))]
AuthBackend { AuthBackend {
#[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: BoxedError,
}, },
@@ -74,10 +72,7 @@ pub enum Error {
}, },
#[snafu(display("User is not authorized to perform this action"))] #[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { PermissionDenied { location: Location },
#[snafu(implicit)]
location: Location,
},
} }
impl ErrorExt for Error { impl ErrorExt for Error {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use common_base::secrets::ExposeSecret; use secrecy::ExposeSecret;
use crate::error::{ use crate::error::{
AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu, AccessDeniedSnafu, Result, UnsupportedPasswordTypeSnafu, UserNotFoundSnafu,

View File

@@ -21,7 +21,7 @@ use std::io;
use std::io::BufRead; use std::io::BufRead;
use std::path::Path; use std::path::Path;
use common_base::secrets::ExposeSecret; use secrecy::ExposeSecret;
use snafu::{ensure, OptionExt, ResultExt}; use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password}; use crate::common::{Identity, Password};

14
src/cache/Cargo.toml vendored
View File

@@ -1,14 +0,0 @@
[package]
name = "cache"
version.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
catalog.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-meta.workspace = true
moka.workspace = true
snafu.workspace = true
substrait.workspace = true

View File

@@ -1,44 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
#[derive(Snafu)]
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to get cache from cache registry: {}", name))]
CacheRequired {
#[snafu(implicit)]
location: Location,
name: String,
},
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::CacheRequired { .. } => StatusCode::Internal,
}
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

135
src/cache/src/lib.rs vendored
View File

@@ -1,135 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod error;
use std::sync::Arc;
use std::time::Duration;
use catalog::kvbackend::new_table_cache;
use common_meta::cache::{
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
LayeredCacheRegistryBuilder,
};
use common_meta::kv_backend::KvBackendRef;
use moka::future::CacheBuilder;
use snafu::OptionExt;
use crate::error::Result;
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
pub const TABLE_CACHE_NAME: &str = "table_cache";
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
// Builds table info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_info_cache = Arc::new(new_table_info_cache(
TABLE_INFO_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table name cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_name_cache = Arc::new(new_table_name_cache(
TABLE_NAME_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table route cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_route_cache = Arc::new(new_table_route_cache(
TABLE_ROUTE_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds table flownode set cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
// Builds the view info cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let view_info_cache = Arc::new(new_view_info_cache(
VIEW_INFO_CACHE_NAME.to_string(),
cache,
kv_backend.clone(),
));
CacheRegistryBuilder::default()
.add_cache(table_info_cache)
.add_cache(table_name_cache)
.add_cache(table_route_cache)
.add_cache(view_info_cache)
.add_cache(table_flownode_set_cache)
.build()
}
// TODO(weny): Make the cache configurable.
pub fn with_default_composite_cache_registry(
builder: LayeredCacheRegistryBuilder,
) -> Result<LayeredCacheRegistryBuilder> {
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
name: TABLE_INFO_CACHE_NAME,
})?;
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
name: TABLE_NAME_CACHE_NAME,
})?;
// Builds table cache
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
.time_to_live(DEFAULT_CACHE_TTL)
.time_to_idle(DEFAULT_CACHE_TTI)
.build();
let table_cache = Arc::new(new_table_cache(
TABLE_CACHE_NAME.to_string(),
cache,
table_info_cache,
table_name_cache,
));
let registry = CacheRegistryBuilder::default()
.add_cache(table_cache)
.build();
Ok(builder.add_cache_registry(registry))
}

View File

@@ -16,9 +16,7 @@ arrow.workspace = true
arrow-schema.workspace = true arrow-schema.workspace = true
async-stream.workspace = true async-stream.workspace = true
async-trait = "0.1" async-trait = "0.1"
bytes.workspace = true
common-catalog.workspace = true common-catalog.workspace = true
common-config.workspace = true
common-error.workspace = true common-error.workspace = true
common-macro.workspace = true common-macro.workspace = true
common-meta.workspace = true common-meta.workspace = true
@@ -32,7 +30,6 @@ datafusion.workspace = true
datatypes.workspace = true datatypes.workspace = true
futures = "0.3" futures = "0.3"
futures-util.workspace = true futures-util.workspace = true
humantime.workspace = true
itertools.workspace = true itertools.workspace = true
lazy_static.workspace = true lazy_static.workspace = true
meta-client.workspace = true meta-client.workspace = true
@@ -49,11 +46,8 @@ table.workspace = true
tokio.workspace = true tokio.workspace = true
[dev-dependencies] [dev-dependencies]
cache.workspace = true
catalog = { workspace = true, features = ["testing"] } catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true chrono.workspace = true
common-meta = { workspace = true, features = ["testing"] }
common-query = { workspace = true, features = ["testing"] }
common-test-util.workspace = true common-test-util.workspace = true
log-store.workspace = true log-store.workspace = true
object-store.workspace = true object-store.workspace = true

View File

@@ -19,7 +19,10 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode; use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug; use common_macro::stack_trace_debug;
use datafusion::error::DataFusionError; use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu}; use snafu::{Location, Snafu};
use table::metadata::TableId;
use tokio::task::JoinError;
#[derive(Snafu)] #[derive(Snafu)]
#[snafu(visibility(pub))] #[snafu(visibility(pub))]
@@ -27,14 +30,12 @@ use snafu::{Location, Snafu};
pub enum Error { pub enum Error {
#[snafu(display("Failed to list catalogs"))] #[snafu(display("Failed to list catalogs"))]
ListCatalogs { ListCatalogs {
#[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to list {}'s schemas", catalog))] #[snafu(display("Failed to list {}'s schemas", catalog))]
ListSchemas { ListSchemas {
#[snafu(implicit)]
location: Location, location: Location,
catalog: String, catalog: String,
source: BoxedError, source: BoxedError,
@@ -42,46 +43,80 @@ pub enum Error {
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))] #[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
ListTables { ListTables {
#[snafu(implicit)]
location: Location, location: Location,
catalog: String, catalog: String,
schema: String, schema: String,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to list nodes in cluster: {source}"))] #[snafu(display("Failed to re-compile script due to internal error"))]
ListNodes { CompileScriptInternal {
#[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to open system catalog table"))]
#[snafu(display("Failed to re-compile script due to internal error"))] OpenSystemCatalog {
CompileScriptInternal {
#[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: table::error::Error,
},
#[snafu(display("Failed to create system catalog table"))]
CreateSystemCatalog {
location: Location,
source: table::error::Error,
}, },
#[snafu(display("Failed to create table, table info: {}", table_info))] #[snafu(display("Failed to create table, table info: {}", table_info))]
CreateTable { CreateTable {
table_info: String, table_info: String,
#[snafu(implicit)]
location: Location, location: Location,
source: table::error::Error, source: table::error::Error,
}, },
#[snafu(display("System catalog is not valid: {}", msg))] #[snafu(display("System catalog is not valid: {}", msg))]
SystemCatalog { SystemCatalog { msg: String, location: Location },
msg: String,
#[snafu(implicit)] #[snafu(display(
"System catalog table type mismatch, expected: binary, found: {:?}",
data_type,
))]
SystemCatalogTypeMismatch {
data_type: ConcreteDataType,
location: Location, location: Location,
}, },
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
InvalidEntryType {
entry_type: Option<u8>,
location: Location,
},
#[snafu(display("Invalid system catalog key: {:?}", key))]
InvalidKey {
key: Option<String>,
location: Location,
},
#[snafu(display("Catalog value is not present"))]
EmptyValue { location: Location },
#[snafu(display("Failed to deserialize value"))]
ValueDeserialize {
#[snafu(source)]
error: serde_json::error::Error,
location: Location,
},
#[snafu(display("Table engine not found: {}", engine_name))]
TableEngineNotFound {
engine_name: String,
location: Location,
source: table::error::Error,
},
#[snafu(display("Cannot find catalog by name: {}", catalog_name))] #[snafu(display("Cannot find catalog by name: {}", catalog_name))]
CatalogNotFound { CatalogNotFound {
catalog_name: String, catalog_name: String,
#[snafu(implicit)]
location: Location, location: Location,
}, },
@@ -89,28 +124,43 @@ pub enum Error {
SchemaNotFound { SchemaNotFound {
catalog: String, catalog: String,
schema: String, schema: String,
#[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Table `{}` already exists", table))] #[snafu(display("Table `{}` already exists", table))]
TableExists { TableExists { table: String, location: Location },
table: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Table not found: {}", table))] #[snafu(display("Table not found: {}", table))]
TableNotExist { TableNotExist { table: String, location: Location },
table: String,
#[snafu(implicit)] #[snafu(display("Schema {} already exists", schema))]
SchemaExists { schema: String, location: Location },
#[snafu(display("Operation {} not implemented yet", operation))]
Unimplemented {
operation: String,
location: Location, location: Location,
}, },
#[snafu(display("View info not found: {}", name))] #[snafu(display("Operation {} not supported", op))]
ViewInfoNotFound { NotSupported { op: String, location: Location },
name: String,
#[snafu(implicit)] #[snafu(display("Failed to open table {table_id}"))]
OpenTable {
table_id: TableId,
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to open table in parallel"))]
ParallelOpenTable {
#[snafu(source)]
error: JoinError,
},
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
TableNotFound {
table_info: String,
location: Location, location: Location,
}, },
@@ -120,44 +170,59 @@ pub enum Error {
#[snafu(display("Failed to find region routes"))] #[snafu(display("Failed to find region routes"))]
FindRegionRoutes { source: partition::error::Error }, FindRegionRoutes { source: partition::error::Error },
#[snafu(display("Failed to create recordbatch"))] #[snafu(display("Failed to read system catalog table records"))]
CreateRecordBatch { ReadSystemCatalog {
#[snafu(implicit)]
location: Location, location: Location,
source: common_recordbatch::error::Error, source: common_recordbatch::error::Error,
}, },
#[snafu(display("Failed to create recordbatch"))]
CreateRecordBatch {
location: Location,
source: common_recordbatch::error::Error,
},
#[snafu(display("Failed to insert table creation record to system catalog"))]
InsertCatalogRecord {
location: Location,
source: table::error::Error,
},
#[snafu(display("Failed to scan system catalog table"))]
SystemCatalogTableScan {
location: Location,
source: table::error::Error,
},
#[snafu(display("Internal error"))] #[snafu(display("Internal error"))]
Internal { Internal {
#[snafu(implicit)]
location: Location, location: Location,
source: BoxedError, source: BoxedError,
}, },
#[snafu(display("Failed to upgrade weak catalog manager reference"))] #[snafu(display("Failed to upgrade weak catalog manager reference"))]
UpgradeWeakCatalogManagerRef { UpgradeWeakCatalogManagerRef { location: Location },
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to decode logical plan for view: {}", name))] #[snafu(display("Failed to execute system catalog table scan"))]
DecodePlan { SystemCatalogTableScanExec {
name: String,
#[snafu(implicit)]
location: Location, location: Location,
source: common_query::error::Error, source: common_query::error::Error,
}, },
#[snafu(display("Cannot parse catalog value"))]
InvalidCatalogValue {
location: Location,
source: common_catalog::error::Error,
},
#[snafu(display("Failed to perform metasrv operation"))] #[snafu(display("Failed to perform metasrv operation"))]
Metasrv { Metasrv {
#[snafu(implicit)]
location: Location, location: Location,
source: meta_client::error::Error, source: meta_client::error::Error,
}, },
#[snafu(display("Invalid table info in catalog"))] #[snafu(display("Invalid table info in catalog"))]
InvalidTableInfoInCatalog { InvalidTableInfoInCatalog {
#[snafu(implicit)]
location: Location, location: Location,
source: datatypes::error::Error, source: datatypes::error::Error,
}, },
@@ -169,43 +234,29 @@ pub enum Error {
Datafusion { Datafusion {
#[snafu(source)] #[snafu(source)]
error: DataFusionError, error: DataFusionError,
#[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Table schema mismatch"))]
TableSchemaMismatch {
location: Location,
source: table::error::Error,
},
#[snafu(display("A generic error has occurred, msg: {}", msg))]
Generic { msg: String, location: Location },
#[snafu(display("Table metadata manager error"))] #[snafu(display("Table metadata manager error"))]
TableMetadataManager { TableMetadataManager {
source: common_meta::error::Error, source: common_meta::error::Error,
#[snafu(implicit)]
location: Location, location: Location,
}, },
#[snafu(display("Failed to get table cache"))] #[snafu(display("Get null from table cache, key: {}", key))]
GetTableCache { TableCacheNotGet { key: String, location: Location },
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to get view info from cache"))] #[snafu(display("Failed to get table cache, err: {}", err_msg))]
GetViewCache { GetTableCache { err_msg: String },
source: common_meta::error::Error,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Cache not found: {name}"))]
CacheNotFound {
name: String,
#[snafu(implicit)]
location: Location,
},
#[snafu(display("Failed to cast the catalog manager"))]
CastManager {
#[snafu(implicit)]
location: Location,
},
} }
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@@ -213,43 +264,60 @@ pub type Result<T> = std::result::Result<T, Error>;
impl ErrorExt for Error { impl ErrorExt for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
Error::SchemaNotFound { .. } Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
| Error::CatalogNotFound { .. } | Error::CatalogNotFound { .. }
| Error::FindPartitions { .. } | Error::FindPartitions { .. }
| Error::FindRegionRoutes { .. } | Error::FindRegionRoutes { .. }
| Error::CacheNotFound { .. } | Error::InvalidEntryType { .. }
| Error::CastManager { .. } => StatusCode::Unexpected, | Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound, Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable, Error::SystemCatalog { .. }
| Error::EmptyValue { .. }
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal, Error::Generic { .. }
| Error::SystemCatalogTypeMismatch { .. }
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
source.status_code()
}
Error::InvalidCatalogValue { source, .. } => source.status_code(),
Error::CreateRecordBatch { source, .. } => source.status_code(),
Error::TableExists { .. } => StatusCode::TableAlreadyExists, Error::TableExists { .. } => StatusCode::TableAlreadyExists,
Error::TableNotExist { .. } => StatusCode::TableNotFound, Error::TableNotExist { .. } => StatusCode::TableNotFound,
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
StatusCode::InvalidArguments
}
Error::ListCatalogs { source, .. } Error::ListCatalogs { source, .. }
| Error::ListNodes { source, .. }
| Error::ListSchemas { source, .. } | Error::ListSchemas { source, .. }
| Error::ListTables { source, .. } => source.status_code(), | Error::ListTables { source, .. } => source.status_code(),
Error::CreateTable { source, .. } => source.status_code(), Error::OpenSystemCatalog { source, .. }
| Error::CreateSystemCatalog { source, .. }
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::Metasrv { source, .. } => source.status_code(), Error::Metasrv { source, .. } => source.status_code(),
Error::DecodePlan { source, .. } => source.status_code(), Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(), Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => { Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
source.status_code() source.status_code()
} }
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied, Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery, Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
Error::TableMetadataManager { source, .. } => source.status_code(), Error::TableMetadataManager { source, .. } => source.status_code(),
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => { Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
source.status_code()
}
} }
} }
@@ -281,6 +349,11 @@ mod tests {
.status_code() .status_code()
); );
assert_eq!(
StatusCode::Unexpected,
InvalidKeySnafu { key: None }.build().status_code()
);
assert_eq!( assert_eq!(
StatusCode::StorageUnavailable, StatusCode::StorageUnavailable,
Error::SystemCatalog { Error::SystemCatalog {
@@ -289,6 +362,19 @@ mod tests {
} }
.status_code() .status_code()
); );
assert_eq!(
StatusCode::Internal,
Error::SystemCatalogTypeMismatch {
data_type: ConcreteDataType::binary_datatype(),
location: Location::generate(),
}
.status_code()
);
assert_eq!(
StatusCode::StorageUnavailable,
EmptyValueSnafu {}.build().status_code()
);
} }
#[test] #[test]

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod cluster_info;
pub mod columns; pub mod columns;
pub mod key_column_usage; pub mod key_column_usage;
mod memory_table; mod memory_table;
@@ -24,7 +23,6 @@ pub mod schemata;
mod table_constraints; mod table_constraints;
mod table_names; mod table_names;
pub mod tables; pub mod tables;
pub(crate) mod utils;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
@@ -49,7 +47,6 @@ pub use table_names::*;
use self::columns::InformationSchemaColumns; use self::columns::InformationSchemaColumns;
use crate::error::Result; use crate::error::Result;
use crate::information_schema::cluster_info::InformationSchemaClusterInfo;
use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage; use crate::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::information_schema::memory_table::{get_schema_columns, MemoryTable}; use crate::information_schema::memory_table::{get_schema_columns, MemoryTable};
use crate::information_schema::partitions::InformationSchemaPartitions; use crate::information_schema::partitions::InformationSchemaPartitions;
@@ -153,7 +150,6 @@ impl InformationSchemaProvider {
fn build_tables(&mut self) { fn build_tables(&mut self) {
let mut tables = HashMap::new(); let mut tables = HashMap::new();
// SECURITY NOTE:
// Carefully consider the tables that may expose sensitive cluster configurations, // Carefully consider the tables that may expose sensitive cluster configurations,
// authentication details, and other critical information. // authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak. // Only put these tables under `greptime` catalog to prevent info leak.
@@ -170,10 +166,6 @@ impl InformationSchemaProvider {
REGION_PEERS.to_string(), REGION_PEERS.to_string(),
self.build_table(REGION_PEERS).unwrap(), self.build_table(REGION_PEERS).unwrap(),
); );
tables.insert(
CLUSTER_INFO.to_string(),
self.build_table(CLUSTER_INFO).unwrap(),
);
} }
tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap()); tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
@@ -259,9 +251,6 @@ impl InformationSchemaProvider {
self.catalog_name.clone(), self.catalog_name.clone(),
self.catalog_manager.clone(), self.catalog_manager.clone(),
)) as _), )) as _),
CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
self.catalog_manager.clone(),
)) as _),
_ => None, _ => None,
} }
} }

View File

@@ -1,317 +0,0 @@
// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Weak};
use std::time::Duration;
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID;
use common_config::Mode;
use common_error::ext::BoxedError;
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
use common_meta::peer::Peer;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_telemetry::warn;
use common_time::timestamp::Timestamp;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::timestamp::TimestampMillisecond;
use datatypes::value::Value;
use datatypes::vectors::{
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
};
use snafu::ResultExt;
use store_api::storage::{ScanRequest, TableId};
use super::CLUSTER_INFO;
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, ListNodesSnafu, Result};
use crate::information_schema::{utils, InformationTable, Predicates};
use crate::CatalogManager;
const PEER_ID: &str = "peer_id";
const PEER_TYPE: &str = "peer_type";
const PEER_ADDR: &str = "peer_addr";
const VERSION: &str = "version";
const GIT_COMMIT: &str = "git_commit";
const START_TIME: &str = "start_time";
const UPTIME: &str = "uptime";
const ACTIVE_TIME: &str = "active_time";
const INIT_CAPACITY: usize = 42;
/// The `CLUSTER_INFO` table provides information about the current topology information of the cluster.
///
/// - `peer_id`: the peer server id.
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
/// - `peer_addr`: the peer gRPC address.
/// - `version`: the build package version of the peer.
/// - `git_commit`: the build git commit hash of the peer.
/// - `start_time`: the starting time of the peer.
/// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer.
///
pub(super) struct InformationSchemaClusterInfo {
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
}
impl InformationSchemaClusterInfo {
pub(super) fn new(catalog_manager: Weak<dyn CatalogManager>) -> Self {
Self {
schema: Self::schema(),
catalog_manager,
start_time_ms: common_time::util::current_time_millis() as u64,
}
}
pub(crate) fn schema() -> SchemaRef {
Arc::new(Schema::new(vec![
ColumnSchema::new(PEER_ID, ConcreteDataType::int64_datatype(), false),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(GIT_COMMIT, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
START_TIME,
ConcreteDataType::timestamp_millisecond_datatype(),
true,
),
ColumnSchema::new(UPTIME, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(ACTIVE_TIME, ConcreteDataType::string_datatype(), true),
]))
}
fn builder(&self) -> InformationSchemaClusterInfoBuilder {
InformationSchemaClusterInfoBuilder::new(
self.schema.clone(),
self.catalog_manager.clone(),
self.start_time_ms,
)
}
}
impl InformationTable for InformationSchemaClusterInfo {
fn table_id(&self) -> TableId {
INFORMATION_SCHEMA_CLUSTER_INFO_TABLE_ID
}
fn table_name(&self) -> &'static str {
CLUSTER_INFO
}
fn schema(&self) -> SchemaRef {
self.schema.clone()
}
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_cluster_info(Some(request))
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
));
Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new)
.context(InternalSnafu)?,
))
}
}
struct InformationSchemaClusterInfoBuilder {
schema: SchemaRef,
start_time_ms: u64,
catalog_manager: Weak<dyn CatalogManager>,
peer_ids: Int64VectorBuilder,
peer_types: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
versions: StringVectorBuilder,
git_commits: StringVectorBuilder,
start_times: TimestampMillisecondVectorBuilder,
uptimes: StringVectorBuilder,
active_times: StringVectorBuilder,
}
impl InformationSchemaClusterInfoBuilder {
fn new(
schema: SchemaRef,
catalog_manager: Weak<dyn CatalogManager>,
start_time_ms: u64,
) -> Self {
Self {
schema,
catalog_manager,
peer_ids: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
uptimes: StringVectorBuilder::with_capacity(INIT_CAPACITY),
active_times: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_time_ms,
}
}
/// Construct the `information_schema.cluster_info` virtual table
async fn make_cluster_info(&mut self, request: Option<ScanRequest>) -> Result<RecordBatch> {
let predicates = Predicates::from_scan_request(&request);
let mode = utils::running_mode(&self.catalog_manager)?.unwrap_or(Mode::Standalone);
match mode {
Mode::Standalone => {
let build_info = common_version::build_info();
self.add_node_info(
&predicates,
NodeInfo {
// For the standalone:
// - id always 0
// - empty string for peer_addr
peer: Peer {
id: 0,
addr: "".to_string(),
},
last_activity_ts: -1,
status: NodeStatus::Standalone,
version: build_info.version.to_string(),
git_commit: build_info.commit_short.to_string(),
// Use `self.start_time_ms` instead.
// It's not precise but enough.
start_time_ms: self.start_time_ms,
},
);
}
Mode::Distributed => {
if let Some(meta_client) = utils::meta_client(&self.catalog_manager)? {
let node_infos = meta_client
.list_nodes(None)
.await
.map_err(BoxedError::new)
.context(ListNodesSnafu)?;
for node_info in node_infos {
self.add_node_info(&predicates, node_info);
}
} else {
warn!("Could not find meta client in distributed mode.");
}
}
}
self.finish()
}
fn add_node_info(&mut self, predicates: &Predicates, node_info: NodeInfo) {
let peer_type = node_info.status.role_name();
let row = [
(PEER_ID, &Value::from(node_info.peer.id)),
(PEER_TYPE, &Value::from(peer_type)),
(PEER_ADDR, &Value::from(node_info.peer.addr.as_str())),
(VERSION, &Value::from(node_info.version.as_str())),
(GIT_COMMIT, &Value::from(node_info.git_commit.as_str())),
];
if !predicates.eval(&row) {
return;
}
if peer_type == "FRONTEND" || peer_type == "METASRV" {
// Always set peer_id to be -1 for frontends and metasrvs
self.peer_ids.push(Some(-1));
} else {
self.peer_ids.push(Some(node_info.peer.id as i64));
}
self.peer_types.push(Some(peer_type));
self.peer_addrs.push(Some(&node_info.peer.addr));
self.versions.push(Some(&node_info.version));
self.git_commits.push(Some(&node_info.git_commit));
if node_info.start_time_ms > 0 {
self.start_times
.push(Some(TimestampMillisecond(Timestamp::new_millisecond(
node_info.start_time_ms as i64,
))));
self.uptimes.push(Some(
Self::format_duration_since(node_info.start_time_ms).as_str(),
));
} else {
self.start_times.push(None);
self.uptimes.push(None);
}
if node_info.last_activity_ts > 0 {
self.active_times.push(Some(
Self::format_duration_since(node_info.last_activity_ts as u64).as_str(),
));
} else {
self.active_times.push(None);
}
}
fn format_duration_since(ts: u64) -> String {
let now = common_time::util::current_time_millis() as u64;
let duration_since = now - ts;
humantime::format_duration(Duration::from_millis(duration_since)).to_string()
}
fn finish(&mut self) -> Result<RecordBatch> {
let columns: Vec<VectorRef> = vec![
Arc::new(self.peer_ids.finish()),
Arc::new(self.peer_types.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.versions.finish()),
Arc::new(self.git_commits.finish()),
Arc::new(self.start_times.finish()),
Arc::new(self.uptimes.finish()),
Arc::new(self.active_times.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
}
impl DfPartitionStream for InformationSchemaClusterInfo {
fn schema(&self) -> &ArrowSchemaRef {
self.schema.arrow_schema()
}
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
let schema = self.schema.arrow_schema().clone();
let mut builder = self.builder();
Box::pin(DfRecordBatchStreamAdapter::new(
schema,
futures::stream::once(async move {
builder
.make_cluster_info(None)
.await
.map(|x| x.into_df_record_batch())
.map_err(Into::into)
}),
))
}
}

View File

@@ -20,9 +20,9 @@ use common_catalog::consts::{
SEMANTIC_TYPE_TIME_INDEX, SEMANTIC_TYPE_TIME_INDEX,
}; };
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -258,7 +258,7 @@ impl InformationSchemaColumnsBuilder {
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name); let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
while let Some(table) = stream.try_next().await? { while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices; let keys = &table.table_info().meta.primary_key_indices;

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,9 @@ use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::datetime::DateTime; use common_time::datetime::DateTime;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -243,6 +243,7 @@ impl InformationSchemaPartitionsBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_info_stream = catalog_manager let table_info_stream = catalog_manager
.tables(&catalog_name, &schema_name) .tables(&catalog_name, &schema_name)
.await
.try_filter_map(|t| async move { .try_filter_map(|t| async move {
let table_info = t.table_info(); let table_info = t.table_info();
if table_info.table_type == TableType::Temporary { if table_info.table_type == TableType::Temporary {

View File

@@ -14,9 +14,10 @@
use arrow::array::StringArray; use arrow::array::StringArray;
use arrow::compute::kernels::comparison; use arrow::compute::kernels::comparison;
use common_query::logical_plan::DfExpr;
use datafusion::common::ScalarValue; use datafusion::common::ScalarValue;
use datafusion::logical_expr::expr::Like; use datafusion::logical_expr::expr::Like;
use datafusion::logical_expr::{Expr, Operator}; use datafusion::logical_expr::Operator;
use datatypes::value::Value; use datatypes::value::Value;
use store_api::storage::ScanRequest; use store_api::storage::ScanRequest;
@@ -117,12 +118,12 @@ impl Predicate {
} }
/// Try to create a predicate from datafusion [`Expr`], return None if fails. /// Try to create a predicate from datafusion [`Expr`], return None if fails.
fn from_expr(expr: Expr) -> Option<Predicate> { fn from_expr(expr: DfExpr) -> Option<Predicate> {
match expr { match expr {
// NOT expr // NOT expr
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))), DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
// expr LIKE pattern // expr LIKE pattern
Expr::Like(Like { DfExpr::Like(Like {
negated, negated,
expr, expr,
pattern, pattern,
@@ -130,10 +131,10 @@ impl Predicate {
.. ..
}) if is_column(&expr) && is_string_literal(&pattern) => { }) if is_column(&expr) && is_string_literal(&pattern) => {
// Safety: ensured by gurad // Safety: ensured by gurad
let Expr::Column(c) = *expr else { let DfExpr::Column(c) = *expr else {
unreachable!(); unreachable!();
}; };
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else { let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
unreachable!(); unreachable!();
}; };
@@ -146,10 +147,10 @@ impl Predicate {
} }
} }
// left OP right // left OP right
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) { DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
// left == right // left == right
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c)) (DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => { | (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else { let Ok(v) = Value::try_from(scalar) else {
return None; return None;
}; };
@@ -157,8 +158,8 @@ impl Predicate {
Some(Predicate::Eq(c.name, v)) Some(Predicate::Eq(c.name, v))
} }
// left != right // left != right
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c)) (DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => { | (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
let Ok(v) = Value::try_from(scalar) else { let Ok(v) = Value::try_from(scalar) else {
return None; return None;
}; };
@@ -182,14 +183,14 @@ impl Predicate {
_ => None, _ => None,
}, },
// [NOT] IN (LIST) // [NOT] IN (LIST)
Expr::InList(list) => { DfExpr::InList(list) => {
match (*list.expr, list.list, list.negated) { match (*list.expr, list.list, list.negated) {
// column [NOT] IN (v1, v2, v3, ...) // column [NOT] IN (v1, v2, v3, ...)
(Expr::Column(c), list, negated) if is_all_scalars(&list) => { (DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
let mut values = Vec::with_capacity(list.len()); let mut values = Vec::with_capacity(list.len());
for scalar in list { for scalar in list {
// Safety: checked by `is_all_scalars` // Safety: checked by `is_all_scalars`
let Expr::Literal(scalar) = scalar else { let DfExpr::Literal(scalar) = scalar else {
unreachable!(); unreachable!();
}; };
@@ -236,12 +237,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
Some(booleans.value(0)) Some(booleans.value(0))
} }
fn is_string_literal(expr: &Expr) -> bool { fn is_string_literal(expr: &DfExpr) -> bool {
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_)))) matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
} }
fn is_column(expr: &Expr) -> bool { fn is_column(expr: &DfExpr) -> bool {
matches!(expr, Expr::Column(_)) matches!(expr, DfExpr::Column(_))
} }
/// A list of predicate /// A list of predicate
@@ -256,7 +257,7 @@ impl Predicates {
let mut predicates = Vec::with_capacity(request.filters.len()); let mut predicates = Vec::with_capacity(request.filters.len());
for filter in &request.filters { for filter in &request.filters {
if let Some(predicate) = Predicate::from_expr(filter.clone()) { if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
predicates.push(predicate); predicates.push(predicate);
} }
} }
@@ -285,8 +286,8 @@ impl Predicates {
} }
/// Returns true when the values are all [`DfExpr::Literal`]. /// Returns true when the values are all [`DfExpr::Literal`].
fn is_all_scalars(list: &[Expr]) -> bool { fn is_all_scalars(list: &[DfExpr]) -> bool {
list.iter().all(|v| matches!(v, Expr::Literal(_))) list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
} }
#[cfg(test)] #[cfg(test)]
@@ -375,7 +376,7 @@ mod tests {
#[test] #[test]
fn test_predicate_like() { fn test_predicate_like() {
// case insensitive // case insensitive
let expr = Expr::Like(Like { let expr = DfExpr::Like(Like {
negated: false, negated: false,
expr: Box::new(column("a")), expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")), pattern: Box::new(string_literal("%abc")),
@@ -402,7 +403,7 @@ mod tests {
assert!(p.eval(&[]).is_none()); assert!(p.eval(&[]).is_none());
// case sensitive // case sensitive
let expr = Expr::Like(Like { let expr = DfExpr::Like(Like {
negated: false, negated: false,
expr: Box::new(column("a")), expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")), pattern: Box::new(string_literal("%abc")),
@@ -422,7 +423,7 @@ mod tests {
assert!(p.eval(&[]).is_none()); assert!(p.eval(&[]).is_none());
// not like // not like
let expr = Expr::Like(Like { let expr = DfExpr::Like(Like {
negated: true, negated: true,
expr: Box::new(column("a")), expr: Box::new(column("a")),
pattern: Box::new(string_literal("%abc")), pattern: Box::new(string_literal("%abc")),
@@ -436,15 +437,15 @@ mod tests {
assert!(p.eval(&[]).is_none()); assert!(p.eval(&[]).is_none());
} }
fn column(name: &str) -> Expr { fn column(name: &str) -> DfExpr {
Expr::Column(Column { DfExpr::Column(Column {
relation: None, relation: None,
name: name.to_string(), name: name.to_string(),
}) })
} }
fn string_literal(v: &str) -> Expr { fn string_literal(v: &str) -> DfExpr {
Expr::Literal(ScalarValue::Utf8(Some(v.to_string()))) DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
} }
fn match_string_value(v: &Value, expected: &str) -> bool { fn match_string_value(v: &Value, expected: &str) -> bool {
@@ -462,14 +463,14 @@ mod tests {
result result
} }
fn mock_exprs() -> (Expr, Expr) { fn mock_exprs() -> (DfExpr, DfExpr) {
let expr1 = Expr::BinaryExpr(BinaryExpr { let expr1 = DfExpr::BinaryExpr(BinaryExpr {
left: Box::new(column("a")), left: Box::new(column("a")),
op: Operator::Eq, op: Operator::Eq,
right: Box::new(string_literal("a_value")), right: Box::new(string_literal("a_value")),
}); });
let expr2 = Expr::BinaryExpr(BinaryExpr { let expr2 = DfExpr::BinaryExpr(BinaryExpr {
left: Box::new(column("b")), left: Box::new(column("b")),
op: Operator::NotEq, op: Operator::NotEq,
right: Box::new(string_literal("b_value")), right: Box::new(string_literal("b_value")),
@@ -490,17 +491,17 @@ mod tests {
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b" assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
&& match_string_value(v, "b_value"))); && match_string_value(v, "b_value")));
let and_expr = Expr::BinaryExpr(BinaryExpr { let and_expr = DfExpr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()), left: Box::new(expr1.clone()),
op: Operator::And, op: Operator::And,
right: Box::new(expr2.clone()), right: Box::new(expr2.clone()),
}); });
let or_expr = Expr::BinaryExpr(BinaryExpr { let or_expr = DfExpr::BinaryExpr(BinaryExpr {
left: Box::new(expr1.clone()), left: Box::new(expr1.clone()),
op: Operator::Or, op: Operator::Or,
right: Box::new(expr2.clone()), right: Box::new(expr2.clone()),
}); });
let not_expr = Expr::Not(Box::new(expr1.clone())); let not_expr = DfExpr::Not(Box::new(expr1.clone()));
let and_p = Predicate::from_expr(and_expr).unwrap(); let and_p = Predicate::from_expr(and_expr).unwrap();
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2)); assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
@@ -509,7 +510,7 @@ mod tests {
let not_p = Predicate::from_expr(not_expr).unwrap(); let not_p = Predicate::from_expr(not_expr).unwrap();
assert!(matches!(not_p, Predicate::Not(p) if *p == p1)); assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
let inlist_expr = Expr::InList(InList { let inlist_expr = DfExpr::InList(InList {
expr: Box::new(column("a")), expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")], list: vec![string_literal("a1"), string_literal("a2")],
negated: false, negated: false,
@@ -519,7 +520,7 @@ mod tests {
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a" assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
&& match_string_values(values, &["a1", "a2"]))); && match_string_values(values, &["a1", "a2"])));
let inlist_expr = Expr::InList(InList { let inlist_expr = DfExpr::InList(InList {
expr: Box::new(column("a")), expr: Box::new(column("a")),
list: vec![string_literal("a1"), string_literal("a2")], list: vec![string_literal("a1"), string_literal("a2")],
negated: true, negated: true,
@@ -539,7 +540,7 @@ mod tests {
let (expr1, expr2) = mock_exprs(); let (expr1, expr2) = mock_exprs();
let request = ScanRequest { let request = ScanRequest {
filters: vec![expr1, expr2], filters: vec![expr1.into(), expr2.into()],
..Default::default() ..Default::default()
}; };
let predicates = Predicates::from_scan_request(&Some(request)); let predicates = Predicates::from_scan_request(&Some(request));
@@ -577,7 +578,7 @@ mod tests {
let (expr1, expr2) = mock_exprs(); let (expr1, expr2) = mock_exprs();
let request = ScanRequest { let request = ScanRequest {
filters: vec![expr1, expr2], filters: vec![expr1.into(), expr2.into()],
..Default::default() ..Default::default()
}; };
let predicates = Predicates::from_scan_request(&Some(request)); let predicates = Predicates::from_scan_request(&Some(request));

View File

@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_meta::rpc::router::RegionRoute; use common_meta::rpc::router::RegionRoute;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -55,7 +55,7 @@ const INIT_CAPACITY: usize = 42;
/// ///
/// - `region_id`: the region id /// - `region_id`: the region id
/// - `peer_id`: the region storage datanode peer id /// - `peer_id`: the region storage datanode peer id
/// - `peer_addr`: the region storage datanode gRPC peer address /// - `peer_addr`: the region storage datanode peer address
/// - `is_leader`: whether the peer is the leader /// - `is_leader`: whether the peer is the leader
/// - `status`: the region status, `ALIVE` or `DOWNGRADED`. /// - `status`: the region status, `ALIVE` or `DOWNGRADED`.
/// - `down_seconds`: the duration of being offline, in seconds. /// - `down_seconds`: the duration of being offline, in seconds.
@@ -179,6 +179,7 @@ impl InformationSchemaRegionPeersBuilder {
for schema_name in catalog_manager.schema_names(&catalog_name).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let table_id_stream = catalog_manager let table_id_stream = catalog_manager
.tables(&catalog_name, &schema_name) .tables(&catalog_name, &schema_name)
.await
.try_filter_map(|t| async move { .try_filter_map(|t| async move {
let table_info = t.table_info(); let table_info = t.table_info();
if table_info.table_type == TableType::Temporary { if table_info.table_type == TableType::Temporary {

View File

@@ -17,10 +17,10 @@ use std::sync::Arc;
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use common_time::util::current_time_millis; use common_time::util::current_time_millis;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -28,8 +28,8 @@ use datatypes::prelude::{ConcreteDataType, MutableVector};
use datatypes::scalars::ScalarVectorBuilder; use datatypes::scalars::ScalarVectorBuilder;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{ use datatypes::vectors::{
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector, ConstantVector, Float64VectorBuilder, StringVector, StringVectorBuilder,
VectorRef, TimestampMillisecondVector, VectorRef,
}; };
use itertools::Itertools; use itertools::Itertools;
use snafu::ResultExt; use snafu::ResultExt;
@@ -45,8 +45,8 @@ pub(super) struct InformationSchemaMetrics {
const METRIC_NAME: &str = "metric_name"; const METRIC_NAME: &str = "metric_name";
const METRIC_VALUE: &str = "value"; const METRIC_VALUE: &str = "value";
const METRIC_LABELS: &str = "labels"; const METRIC_LABELS: &str = "labels";
const PEER_ADDR: &str = "peer_addr"; const NODE: &str = "node";
const PEER_TYPE: &str = "peer_type"; const NODE_TYPE: &str = "node_type";
const TIMESTAMP: &str = "timestamp"; const TIMESTAMP: &str = "timestamp";
/// The `information_schema.runtime_metrics` virtual table. /// The `information_schema.runtime_metrics` virtual table.
@@ -63,8 +63,8 @@ impl InformationSchemaMetrics {
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false), ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false), ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true), ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true), ColumnSchema::new(NODE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false), ColumnSchema::new(NODE_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new( ColumnSchema::new(
TIMESTAMP, TIMESTAMP,
ConcreteDataType::timestamp_millisecond_datatype(), ConcreteDataType::timestamp_millisecond_datatype(),
@@ -104,7 +104,6 @@ impl InformationTable for InformationSchemaMetrics {
.map_err(Into::into) .map_err(Into::into)
}), }),
)); ));
Ok(Box::pin( Ok(Box::pin(
RecordBatchStreamAdapter::try_new(stream) RecordBatchStreamAdapter::try_new(stream)
.map_err(BoxedError::new) .map_err(BoxedError::new)
@@ -119,8 +118,6 @@ struct InformationSchemaMetricsBuilder {
metric_names: StringVectorBuilder, metric_names: StringVectorBuilder,
metric_values: Float64VectorBuilder, metric_values: Float64VectorBuilder,
metric_labels: StringVectorBuilder, metric_labels: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
peer_types: StringVectorBuilder,
} }
impl InformationSchemaMetricsBuilder { impl InformationSchemaMetricsBuilder {
@@ -130,24 +127,13 @@ impl InformationSchemaMetricsBuilder {
metric_names: StringVectorBuilder::with_capacity(42), metric_names: StringVectorBuilder::with_capacity(42),
metric_values: Float64VectorBuilder::with_capacity(42), metric_values: Float64VectorBuilder::with_capacity(42),
metric_labels: StringVectorBuilder::with_capacity(42), metric_labels: StringVectorBuilder::with_capacity(42),
peer_addrs: StringVectorBuilder::with_capacity(42),
peer_types: StringVectorBuilder::with_capacity(42),
} }
} }
fn add_metric( fn add_metric(&mut self, metric_name: &str, labels: String, metric_value: f64) {
&mut self,
metric_name: &str,
labels: String,
metric_value: f64,
peer: Option<&str>,
peer_type: &str,
) {
self.metric_names.push(Some(metric_name)); self.metric_names.push(Some(metric_name));
self.metric_values.push(Some(metric_value)); self.metric_values.push(Some(metric_value));
self.metric_labels.push(Some(&labels)); self.metric_labels.push(Some(&labels));
self.peer_addrs.push(peer);
self.peer_types.push(Some(peer_type));
} }
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> { async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
@@ -184,19 +170,18 @@ impl InformationSchemaMetricsBuilder {
.join(", "), .join(", "),
// Safety: always has a sample // Safety: always has a sample
ts.samples[0].value, ts.samples[0].value,
// The peer column is always `None` for standalone
None,
"STANDALONE",
); );
} }
// FIXME(dennis): fetching other peers metrics
self.finish() self.finish()
} }
fn finish(&mut self) -> Result<RecordBatch> { fn finish(&mut self) -> Result<RecordBatch> {
let rows_num = self.metric_names.len(); let rows_num = self.metric_names.len();
let unknowns = Arc::new(ConstantVector::new(
Arc::new(StringVector::from(vec!["unknown"])),
rows_num,
));
let timestamps = Arc::new(ConstantVector::new( let timestamps = Arc::new(ConstantVector::new(
Arc::new(TimestampMillisecondVector::from_slice([ Arc::new(TimestampMillisecondVector::from_slice([
current_time_millis(), current_time_millis(),
@@ -208,8 +193,9 @@ impl InformationSchemaMetricsBuilder {
Arc::new(self.metric_names.finish()), Arc::new(self.metric_names.finish()),
Arc::new(self.metric_values.finish()), Arc::new(self.metric_values.finish()),
Arc::new(self.metric_labels.finish()), Arc::new(self.metric_labels.finish()),
Arc::new(self.peer_addrs.finish()), // TODO(dennis): supports node and node_type for cluster
Arc::new(self.peer_types.finish()), unknowns.clone(),
unknowns,
timestamps, timestamps,
]; ];
@@ -257,8 +243,8 @@ mod tests {
assert!(result_literal.contains(METRIC_NAME)); assert!(result_literal.contains(METRIC_NAME));
assert!(result_literal.contains(METRIC_VALUE)); assert!(result_literal.contains(METRIC_VALUE));
assert!(result_literal.contains(METRIC_LABELS)); assert!(result_literal.contains(METRIC_LABELS));
assert!(result_literal.contains(PEER_ADDR)); assert!(result_literal.contains(NODE));
assert!(result_literal.contains(PEER_TYPE)); assert!(result_literal.contains(NODE_TYPE));
assert!(result_literal.contains(TIMESTAMP)); assert!(result_literal.contains(TIMESTAMP));
} }
} }

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;

View File

@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef; use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID; use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
use common_error::ext::BoxedError; use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
@@ -177,7 +177,7 @@ impl InformationSchemaTableConstraintsBuilder {
let predicates = Predicates::from_scan_request(&request); let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? { for schema_name in catalog_manager.schema_names(&catalog_name).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name); let mut stream = catalog_manager.tables(&catalog_name, &schema_name).await;
while let Some(table) = stream.try_next().await? { while let Some(table) = stream.try_next().await? {
let keys = &table.table_info().meta.primary_key_indices; let keys = &table.table_info().meta.primary_key_indices;

Some files were not shown because too many files have changed in this diff Show More