mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-10 15:22:56 +00:00
Compare commits
234 Commits
poc_datafl
...
v0.9.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ddc7a80f56 | ||
|
|
a7aa556763 | ||
|
|
ef935a1de6 | ||
|
|
352cc9ddde | ||
|
|
b6585e3581 | ||
|
|
10b7a3d24d | ||
|
|
8702066967 | ||
|
|
df0fff2f2c | ||
|
|
a779cb36ec | ||
|
|
948c8695d0 | ||
|
|
4d4a6cd265 | ||
|
|
5dde148b3d | ||
|
|
8cbe7166b0 | ||
|
|
f5ac158605 | ||
|
|
120447779c | ||
|
|
82f6373574 | ||
|
|
1e815dddf1 | ||
|
|
b2f61aa1cf | ||
|
|
a1e2612bbf | ||
|
|
9aaf7d79bf | ||
|
|
4a4237115a | ||
|
|
840f81e0fd | ||
|
|
cdd4baf183 | ||
|
|
4b42c7b840 | ||
|
|
a44fe627ce | ||
|
|
77904adaaf | ||
|
|
07cbabab7b | ||
|
|
ea7c17089f | ||
|
|
517917453d | ||
|
|
0139a70549 | ||
|
|
5566dd72f2 | ||
|
|
dea33a7aaf | ||
|
|
15ad9f2f6f | ||
|
|
fce65c97e3 | ||
|
|
ac574b66ab | ||
|
|
1e52ba325f | ||
|
|
b739c9fd10 | ||
|
|
21c89f3247 | ||
|
|
5bcd7a14bb | ||
|
|
4306cba866 | ||
|
|
4c3d4af127 | ||
|
|
48a0f39b19 | ||
|
|
8abebad458 | ||
|
|
cc2f7efb98 | ||
|
|
22d12683b4 | ||
|
|
fe74efdafe | ||
|
|
cd9705ccd7 | ||
|
|
ea2d067cf1 | ||
|
|
70d113a355 | ||
|
|
cb657ae51e | ||
|
|
141d017576 | ||
|
|
0fc18b6865 | ||
|
|
0aceebf0a3 | ||
|
|
558272de61 | ||
|
|
f4a5a44549 | ||
|
|
5390603855 | ||
|
|
a2e3532a57 | ||
|
|
2faa6d6c97 | ||
|
|
d6392acd65 | ||
|
|
01e3a24cf7 | ||
|
|
bf3ad44584 | ||
|
|
11a903f193 | ||
|
|
acdfaabfa5 | ||
|
|
54ca06ba08 | ||
|
|
1f315e300f | ||
|
|
573e25a40f | ||
|
|
f8ec46493f | ||
|
|
14a2d83594 | ||
|
|
65f8b72d34 | ||
|
|
9473daab8b | ||
|
|
5a6021e34f | ||
|
|
1b00526de5 | ||
|
|
5533bd9293 | ||
|
|
587e99d806 | ||
|
|
9cae15bd1b | ||
|
|
d8b51cfaba | ||
|
|
e142ca40d7 | ||
|
|
e982d2e55c | ||
|
|
09e0e1b246 | ||
|
|
9c42825f5d | ||
|
|
4719569e4f | ||
|
|
b03cb3860e | ||
|
|
2ade511f26 | ||
|
|
16b85b06b6 | ||
|
|
03cacf9948 | ||
|
|
c23f8ad113 | ||
|
|
e0a2c5a581 | ||
|
|
417ab3b779 | ||
|
|
1850fe2956 | ||
|
|
dd06e107f9 | ||
|
|
98c19ed0fa | ||
|
|
c0aed1d267 | ||
|
|
0a07130931 | ||
|
|
a6269397c8 | ||
|
|
a80059b47f | ||
|
|
b3a4362626 | ||
|
|
51e2b6e728 | ||
|
|
d1838fb28d | ||
|
|
cd97a39904 | ||
|
|
4e5dd1ebb0 | ||
|
|
88cdefa41e | ||
|
|
c2218f8be8 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 | ||
|
|
05751084e7 | ||
|
|
8b6596faa0 | ||
|
|
eab309ff7e | ||
|
|
7de336f087 | ||
|
|
6e9a9dc333 | ||
|
|
848bd7e553 | ||
|
|
f0effd2680 | ||
|
|
aafb468547 | ||
|
|
4aa756c896 | ||
|
|
d3860671a8 | ||
|
|
9dd6e033a7 | ||
|
|
097f62f459 | ||
|
|
048368fd87 | ||
|
|
f9db5ff0d6 | ||
|
|
20ce7d428d | ||
|
|
75bddc0bf5 | ||
|
|
c78043d526 | ||
|
|
297105266b | ||
|
|
1de17aec74 | ||
|
|
389ded93d1 | ||
|
|
af486ec0d0 | ||
|
|
25d64255a3 | ||
|
|
3790020d78 | ||
|
|
5df3d4e5da | ||
|
|
af670df515 | ||
|
|
a58256d4d3 | ||
|
|
466f7c6448 | ||
|
|
0101657649 | ||
|
|
a3a2c8d063 | ||
|
|
dfc1acbb2a | ||
|
|
0d055b6ee6 | ||
|
|
614643ef7b | ||
|
|
b90b7adf6f | ||
|
|
418090b464 | ||
|
|
090b59e8d6 | ||
|
|
9e1af79637 | ||
|
|
9800807fe5 | ||
|
|
b86d79b906 | ||
|
|
e070ba3c32 | ||
|
|
43bf7bffd0 | ||
|
|
56aed6e6ff | ||
|
|
47785756e5 | ||
|
|
0aa523cd8c | ||
|
|
7a8222dd97 | ||
|
|
40c585890a | ||
|
|
da925e956e | ||
|
|
d7ade3c854 | ||
|
|
179c8c716c | ||
|
|
19543f9819 | ||
|
|
533ada70ca | ||
|
|
c50ff23194 | ||
|
|
d7f1150098 | ||
|
|
82c3eca25e | ||
|
|
df13832a59 | ||
|
|
7da92eb9eb | ||
|
|
c71298d3d5 | ||
|
|
de594833ac | ||
|
|
6a9a92931d | ||
|
|
11ad5b3ed1 | ||
|
|
b8354bbb55 | ||
|
|
258675b75e | ||
|
|
11a08cb272 | ||
|
|
e9b178b8b9 | ||
|
|
3477fde0e5 | ||
|
|
9baa431656 | ||
|
|
e2a1cb5840 | ||
|
|
f696f41a02 | ||
|
|
0168d43d60 | ||
|
|
e372e25e30 | ||
|
|
ca409a732f | ||
|
|
5c0a530ad1 | ||
|
|
4b030456f6 | ||
|
|
f93b5b19f0 | ||
|
|
669a6d84e9 | ||
|
|
a45017ad71 | ||
|
|
0d9e71b653 | ||
|
|
93f178f3ad | ||
|
|
9f4a6c6fe2 | ||
|
|
c915916b62 | ||
|
|
dff7ba7598 | ||
|
|
fe34ebf770 | ||
|
|
a1c51a5885 | ||
|
|
63a8d293a1 | ||
|
|
6c621b7fcf | ||
|
|
529e344450 | ||
|
|
2a169f9364 | ||
|
|
97eb196699 | ||
|
|
cfae276d37 | ||
|
|
09129a911e | ||
|
|
15d7b9755e | ||
|
|
72897a20e3 | ||
|
|
c04d02460f | ||
|
|
4ca7ac7632 | ||
|
|
a260ba3ee7 | ||
|
|
efd3f04b7c | ||
|
|
f16ce3ca27 | ||
|
|
6214180ecd | ||
|
|
00e21e2021 | ||
|
|
494ce65729 | ||
|
|
e15294db41 | ||
|
|
be1eb4efb7 | ||
|
|
9d12496aaf | ||
|
|
5d8084a32f | ||
|
|
60eb5de3f1 | ||
|
|
a0be7198f9 | ||
|
|
6ab3aeb142 | ||
|
|
590aedd466 | ||
|
|
27e376e892 | ||
|
|
36c41d129c | ||
|
|
89da42dbc1 | ||
|
|
04852aa27e | ||
|
|
d0820bb26d | ||
|
|
fa6c371380 | ||
|
|
9aa2182cb2 | ||
|
|
bca2e393bf | ||
|
|
b1ef327bac | ||
|
|
115c74791d | ||
|
|
aec5cca2c7 | ||
|
|
06e1c43743 | ||
|
|
9d36c31209 | ||
|
|
c91132bd14 | ||
|
|
25e9076f5b | ||
|
|
08945f128b | ||
|
|
5a0629eaa0 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Is something not working? Help us fix it!
|
||||
labels: [ "bug" ]
|
||||
labels: [ "C-bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
||||
url: https://greptime.com/slack
|
||||
about: Get free help from the Greptime community
|
||||
- name: Greptime Community Discussion
|
||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
||||
url: https://github.com/greptimeTeam/discussions
|
||||
about: Get free help from the Greptime community
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Enhancement
|
||||
description: Suggest an enhancement to existing functionality
|
||||
labels: [ "enhancement" ]
|
||||
labels: [ "C-enhancement" ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: type
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: New Feature
|
||||
description: Suggest a new feature for GreptimeDB
|
||||
labels: [ "feature request" ]
|
||||
labels: [ "C-feature" ]
|
||||
body:
|
||||
- type: markdown
|
||||
id: info
|
||||
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build and push CI Docker image
|
||||
description: Build and push CI Docker image to local registry
|
||||
inputs:
|
||||
binary_path:
|
||||
default: "./bin"
|
||||
description: "Binary path"
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build and push to local registry
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
||||
push: true
|
||||
tags: localhost:5001/greptime/greptimedb:latest
|
||||
build-args: |
|
||||
BINARY_PATH=${{ inputs.binary_path }}
|
||||
16
.github/actions/build-greptime-binary/action.yml
vendored
16
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,6 +24,14 @@ inputs:
|
||||
description: Build android artifacts
|
||||
required: false
|
||||
default: 'false'
|
||||
image-namespace:
|
||||
description: Image Namespace
|
||||
required: false
|
||||
default: 'greptime'
|
||||
image-registry:
|
||||
description: Image Registry
|
||||
required: false
|
||||
default: 'docker.io'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -35,7 +43,9 @@ runs:
|
||||
make build-by-dev-builder \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
FEATURES=${{ inputs.features }} \
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
BASE_IMAGE=${{ inputs.base-image }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
@@ -53,7 +63,9 @@ runs:
|
||||
shell: bash
|
||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
||||
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||
|
||||
- name: Upload android artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
|
||||
12
.github/actions/build-linux-artifacts/action.yml
vendored
12
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -30,7 +30,9 @@ runs:
|
||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||
run: |
|
||||
cd ${{ inputs.working-dir }} && \
|
||||
make run-it-in-container BUILD_JOBS=4
|
||||
make run-it-in-container BUILD_JOBS=4 \
|
||||
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||
IMAGE_REGISTRY=public.ecr.aws
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||
@@ -49,6 +51,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime without pyo3
|
||||
if: ${{ inputs.dev-mode == 'false' }}
|
||||
@@ -60,6 +64,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||
shell: bash
|
||||
@@ -76,6 +82,8 @@ runs:
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
- name: Build greptime on android base image
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
@@ -86,3 +94,5 @@ runs:
|
||||
version: ${{ inputs.version }}
|
||||
working-dir: ${{ inputs.working-dir }}
|
||||
build-android-artifacts: true
|
||||
image-registry: public.ecr.aws
|
||||
image-namespace: i8k6a5e1/greptime
|
||||
|
||||
@@ -59,9 +59,15 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||
# linker that prevents backtraces from getting printed correctly.
|
||||
#
|
||||
# <https://github.com/rust-lang/rust/issues/113783>
|
||||
- name: Run integration tests
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make test sqlness-test
|
||||
|
||||
@@ -75,6 +81,8 @@ runs:
|
||||
|
||||
- name: Build greptime binary
|
||||
shell: bash
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||
run: |
|
||||
make build \
|
||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||
|
||||
@@ -59,6 +59,9 @@ runs:
|
||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
- name: Upload sqlness logs
|
||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||
|
||||
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
17
.github/actions/setup-chaos/action.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add chaos-mesh https://charts.chaos-mesh.org
|
||||
kubectl create ns chaos-mesh
|
||||
helm install chaos-mesh chaos-mesh/chaos-mesh -n=chaos-mesh --version 2.6.3
|
||||
- name: Print Chaos-mesh
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get po -n chaos-mesh
|
||||
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Setup cyborg environment
|
||||
description: Setup cyborg environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
shell: bash
|
||||
run: pnpm tsx -v
|
||||
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Setup Etcd cluster
|
||||
description: Deploy Etcd cluster on Kubernetes
|
||||
inputs:
|
||||
etcd-replicas:
|
||||
default: 3
|
||||
description: "Etcd replicas"
|
||||
namespace:
|
||||
default: "etcd-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||
--set resources.requests.cpu=50m \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set auth.rbac.create=false \
|
||||
--set auth.rbac.token.enabled=false \
|
||||
--set persistence.size=2Gi \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
91
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
91
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
name: Setup GreptimeDB cluster
|
||||
description: Deploy GreptimeDB cluster on Kubernetes
|
||||
inputs:
|
||||
frontend-replicas:
|
||||
default: 2
|
||||
description: "Number of Frontend replicas"
|
||||
datanode-replicas:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 3
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
description: "Image registry"
|
||||
image-repository:
|
||||
default: "greptime/greptimedb"
|
||||
description: "Image repository"
|
||||
image-tag:
|
||||
default: "latest"
|
||||
description: 'Image tag'
|
||||
etcd-endpoints:
|
||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||
description: "Etcd endpoints"
|
||||
values-filename:
|
||||
default: "with-minio.yaml"
|
||||
enable-region-failover:
|
||||
default: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install GreptimeDB operator
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||
helm repo update
|
||||
helm upgrade \
|
||||
--install \
|
||||
--create-namespace \
|
||||
greptimedb-operator greptime/greptimedb-operator \
|
||||
-n greptimedb-admin \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Install GreptimeDB cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install my-greptimedb \
|
||||
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||
--set image.registry=${{ inputs.image-registry }} \
|
||||
--set image.repository=${{ inputs.image-repository }} \
|
||||
--set image.tag=${{ inputs.image-tag }} \
|
||||
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||
greptime/greptimedb-cluster \
|
||||
--create-namespace \
|
||||
-n my-greptimedb \
|
||||
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||
--wait \
|
||||
--wait-for-jobs
|
||||
- name: Wait for GreptimeDB
|
||||
shell: bash
|
||||
run: |
|
||||
while true; do
|
||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||
if [ "$PHASE" == "Running" ]; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
else
|
||||
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||
kubectl get pods -n my-greptimedb
|
||||
sleep 5 # wait for 5 seconds before check again.
|
||||
fi
|
||||
done
|
||||
- name: Print GreptimeDB info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl get all --show-labels -n my-greptimedb
|
||||
- name: Describe Nodes
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[storage]
|
||||
cache_path = "/data/greptimedb/s3cache"
|
||||
cache_capacity = "256MB"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
meta:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
num_topics = 3
|
||||
|
||||
|
||||
[datanode]
|
||||
[datanode.client]
|
||||
timeout = "60s"
|
||||
datanode:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[wal]
|
||||
provider = "kafka"
|
||||
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||
linger = "2ms"
|
||||
frontend:
|
||||
config: |-
|
||||
[runtime]
|
||||
read_rt_size = 8
|
||||
write_rt_size = 8
|
||||
bg_rt_size = 8
|
||||
|
||||
[meta_client]
|
||||
ddl_timeout = "60s"
|
||||
objectStorage:
|
||||
s3:
|
||||
bucket: default
|
||||
region: us-west-2
|
||||
root: test-root
|
||||
endpoint: http://minio.minio.svc.cluster.local
|
||||
credentials:
|
||||
accessKeyId: rootuser
|
||||
secretAccessKey: rootpass123
|
||||
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Kafka cluster
|
||||
description: Deploy Kafka cluster on Kubernetes
|
||||
inputs:
|
||||
controller-replicas:
|
||||
default: 3
|
||||
description: "Kafka controller replicas"
|
||||
namespace:
|
||||
default: "kafka-cluster"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Kafka cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade \
|
||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||
--set controller.resources.requests.cpu=50m \
|
||||
--set controller.resources.requests.memory=128Mi \
|
||||
--set listeners.controller.protocol=PLAINTEXT \
|
||||
--set listeners.client.protocol=PLAINTEXT \
|
||||
--create-namespace \
|
||||
-n ${{ inputs.namespace }}
|
||||
10
.github/actions/setup-kind/action.yml
vendored
Normal file
10
.github/actions/setup-kind/action.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Setup Kind
|
||||
description: Deploy Kind
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/kind-with-registry.sh
|
||||
24
.github/actions/setup-minio/action.yml
vendored
Normal file
24
.github/actions/setup-minio/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Setup Minio cluster
|
||||
description: Deploy Minio cluster on Kubernetes
|
||||
inputs:
|
||||
replicas:
|
||||
default: 1
|
||||
description: "replicas"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Etcd cluster
|
||||
shell: bash
|
||||
run: |
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm upgrade --install minio \
|
||||
--set resources.requests.memory=128Mi \
|
||||
--set replicas=${{ inputs.replicas }} \
|
||||
--set mode=standalone \
|
||||
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||
--set buckets[0].name=default \
|
||||
--set service.port=80,service.targetPort=9000 \
|
||||
minio/minio \
|
||||
--create-namespace \
|
||||
-n minio
|
||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,3 +57,14 @@ runs:
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
run: |
|
||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kind-logs
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
|
||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
||||
Doc not needed:
|
||||
- '- \[x\] This PR does not require documentation updates.'
|
||||
Doc update required:
|
||||
- '- \[ \] This PR does not require documentation updates.'
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [x] This PR does not require documentation updates.
|
||||
- [ ] I have written the necessary rustdoc comments.
|
||||
- [ ] I have added the necessary unit tests and integration tests.
|
||||
- [ ] This PR requires documentation updates.
|
||||
|
||||
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# 1. Create registry container unless it already exists
|
||||
reg_name='kind-registry'
|
||||
reg_port='5001'
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||
docker run \
|
||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
||||
registry:2
|
||||
fi
|
||||
|
||||
# 2. Create kind cluster with containerd registry config dir enabled
|
||||
# TODO: kind will eventually enable this by default and this patch will
|
||||
# be unnecessary.
|
||||
#
|
||||
# See:
|
||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
cat <<EOF | kind create cluster --wait 2m --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
EOF
|
||||
|
||||
# 3. Add the registry config to the nodes
|
||||
#
|
||||
# This is necessary because localhost resolves to loopback addresses that are
|
||||
# network-namespace local.
|
||||
# In other words: localhost in the container is not localhost on the host.
|
||||
#
|
||||
# We want a consistent name that works from both ends, so we tell containerd to
|
||||
# alias localhost:${reg_port} to the registry container when pulling images
|
||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||
for node in $(kind get nodes); do
|
||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||
[host."http://${reg_name}:5000"]
|
||||
EOF
|
||||
done
|
||||
|
||||
# 4. Connect the registry to the cluster network if not already connected
|
||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||
docker network connect "kind" "${reg_name}"
|
||||
fi
|
||||
|
||||
# 5. Document the local registry
|
||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-registry-hosting
|
||||
namespace: kube-public
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: "localhost:${reg_port}"
|
||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||
EOF
|
||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
name: Build API docs
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
apidoc:
|
||||
|
||||
20
.github/workflows/dev-build.yml
vendored
20
.github/workflows/dev-build.yml
vendored
@@ -82,6 +82,9 @@ env:
|
||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -321,7 +324,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -330,16 +333,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy dev build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy dev build failed result
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
358
.github/workflows/develop.yml
vendored
358
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
jobs:
|
||||
check-typos-and-docs:
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest, ubuntu-20.04 ]
|
||||
os: [ windows-2022, ubuntu-20.04 ]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -160,14 +160,16 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
run: |
|
||||
tar -xvf ./bins.tar.gz
|
||||
rm ./bins.tar.gz
|
||||
- name: Run GreptimeDB
|
||||
run: |
|
||||
./bins/greptime standalone start&
|
||||
@@ -182,7 +184,7 @@ jobs:
|
||||
|
||||
unstable-fuzztest:
|
||||
name: Unstable Fuzz Test
|
||||
needs: build
|
||||
needs: build-greptime-ci
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -204,20 +206,22 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||
cargo install cargo-fuzz
|
||||
- name: Download pre-built binaries
|
||||
cargo install cargo-fuzz cargo-gc-bin
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Fuzz Test
|
||||
- name: Unzip bianry
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Run Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
||||
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
@@ -231,10 +235,8 @@ jobs:
|
||||
path: /tmp/unstable-greptime/
|
||||
retention-days: 3
|
||||
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
needs: build
|
||||
build-greptime-ci:
|
||||
name: Build GreptimeDB binary (profile-CI)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -242,30 +244,311 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Download pre-built binaries
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "build-greptime-ci"
|
||||
- name: Install cargo-gc-bin
|
||||
shell: bash
|
||||
run: cargo install cargo-gc-bin
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir bin && \
|
||||
mv ./target/ci/greptime bin
|
||||
- name: Print greptime binaries info
|
||||
run: ls -lh bin
|
||||
- name: Upload artifacts
|
||||
uses: ./.github/actions/upload-artifacts
|
||||
with:
|
||||
artifacts-dir: bin
|
||||
version: current
|
||||
|
||||
distributed-fuzztest:
|
||||
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
strategy:
|
||||
matrix:
|
||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||
mode:
|
||||
- name: "Disk"
|
||||
minio: false
|
||||
kafka: false
|
||||
values: "with-disk.yaml"
|
||||
- name: "Minio"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio.yaml"
|
||||
- name: "Minio with Cache"
|
||||
minio: true
|
||||
kafka: false
|
||||
values: "with-minio-and-cache.yaml"
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bins
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs
|
||||
path: /tmp/sqlness-*
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
distributed-fuzztest-with-chaos:
|
||||
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-greptime-ci
|
||||
strategy:
|
||||
matrix:
|
||||
target: ["fuzz_failover_mito_regions"]
|
||||
mode:
|
||||
- name: "Remote WAL"
|
||||
minio: true
|
||||
kafka: true
|
||||
values: "with-remote-wal.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Kind
|
||||
uses: ./.github/actions/setup-kind
|
||||
- name: Setup Chaos Mesh
|
||||
uses: ./.github/actions/setup-chaos
|
||||
- if: matrix.mode.minio
|
||||
name: Setup Minio
|
||||
uses: ./.github/actions/setup-minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup Kafka cluser
|
||||
uses: ./.github/actions/setup-kafka-cluster
|
||||
- name: Setup Etcd cluser
|
||||
uses: ./.github/actions/setup-etcd-cluster
|
||||
# Prepares for fuzz tests
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# Shares across multiple jobs
|
||||
shared-key: "fuzz-test-targets"
|
||||
- name: Set Rust Fuzz
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libfuzzer-14-dev
|
||||
rustup install nightly
|
||||
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||
# Downloads ci image
|
||||
- name: Download pre-built binariy
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bin
|
||||
path: .
|
||||
- name: Unzip binary
|
||||
run: |
|
||||
tar -xvf ./bin.tar.gz
|
||||
rm ./bin.tar.gz
|
||||
- name: Build and push GreptimeDB image
|
||||
uses: ./.github/actions/build-and-push-ci-image
|
||||
- name: Wait for etcd
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=etcd \
|
||||
--timeout=120s \
|
||||
-n etcd-cluster
|
||||
- if: matrix.mode.minio
|
||||
name: Wait for minio
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app=minio \
|
||||
--timeout=120s \
|
||||
-n minio
|
||||
- if: matrix.mode.kafka
|
||||
name: Wait for kafka
|
||||
run: |
|
||||
kubectl wait \
|
||||
--for=condition=Ready \
|
||||
pod -l app.kubernetes.io/instance=kafka \
|
||||
--timeout=120s \
|
||||
-n kafka-cluster
|
||||
- name: Print etcd info
|
||||
shell: bash
|
||||
run: kubectl get all --show-labels -n etcd-cluster
|
||||
# Setup cluster for test
|
||||
- name: Setup GreptimeDB cluster
|
||||
uses: ./.github/actions/setup-greptimedb-cluster
|
||||
with:
|
||||
image-registry: localhost:5001
|
||||
values-filename: ${{ matrix.mode.values }}
|
||||
enable-region-failover: true
|
||||
- name: Port forward (mysql)
|
||||
run: |
|
||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||
- name: Fuzz Test
|
||||
uses: ./.github/actions/fuzz-test
|
||||
env:
|
||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
max-total-time: 120
|
||||
- name: Describe Nodes
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kubectl describe nodes
|
||||
- name: Export kind logs
|
||||
if: failure()
|
||||
shell: bash
|
||||
run: |
|
||||
kind export logs /tmp/kind
|
||||
- name: Upload logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||
path: /tmp/kind
|
||||
retention-days: 3
|
||||
- name: Delete cluster
|
||||
if: success()
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster
|
||||
docker stop $(docker ps -a -q)
|
||||
docker rm $(docker ps -a -q)
|
||||
docker system prune -f
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
needs: build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
opts: ""
|
||||
kafka: false
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -276,17 +559,18 @@ jobs:
|
||||
path: .
|
||||
- name: Unzip binaries
|
||||
run: tar -xvf ./bins.tar.gz
|
||||
- name: Setup kafka server
|
||||
- if: matrix.mode.kafka
|
||||
name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run sqlness
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sqlness-logs-with-kafka-wal
|
||||
path: /tmp/sqlness-*
|
||||
name: sqlness-logs-${{ matrix.mode.name }}
|
||||
path: /tmp/sqlness*
|
||||
retention-days: 3
|
||||
|
||||
fmt:
|
||||
@@ -374,6 +658,9 @@ jobs:
|
||||
- name: Setup kafka server
|
||||
working-directory: tests-integration/fixtures/kafka
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Setup minio
|
||||
working-directory: tests-integration/fixtures/minio
|
||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||
env:
|
||||
@@ -384,6 +671,11 @@ jobs:
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
GT_MINIO_BUCKET: greptime
|
||||
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||
GT_MINIO_ACCESS_KEY: superpower_password
|
||||
GT_MINIO_REGION: us-west-2
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
39
.github/workflows/doc-issue.yml
vendored
39
.github/workflows/doc-issue.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Create Issue in downstream repos
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
doc_issue:
|
||||
if: github.event.label.name == 'doc update required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
cloud_issue:
|
||||
if: github.event.label.name == 'cloud followup required'
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: create an issue in cloud repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: greptimedb-cloud
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A followup request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
36
.github/workflows/doc-label.yml
vendored
36
.github/workflows/doc-label.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: "PR Doc Labeler"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: github/issue-labeler@v3.4
|
||||
with:
|
||||
configuration-path: .github/doc-label-config.yml
|
||||
enable-versioned-regex: false
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: 1
|
||||
- name: create an issue in doc repo
|
||||
uses: dacbd/create-issue-action@v1.2.1
|
||||
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
||||
with:
|
||||
owner: GreptimeTeam
|
||||
repo: docs
|
||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
||||
body: |
|
||||
A document change request is generated from
|
||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
||||
- name: Check doc labels
|
||||
uses: docker://agilepathway/pull-request-label-checker:latest
|
||||
with:
|
||||
one_of: Doc update required,Doc not needed
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
22
.github/workflows/docbot.yml
vendored
Normal file
22
.github/workflows/docbot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Follow Up Docs
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docbot:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Maybe Follow Up Docs Issue
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||
env:
|
||||
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
14
.github/workflows/docs.yml
vendored
14
.github/workflows/docs.yml
vendored
@@ -67,19 +67,13 @@ jobs:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness:
|
||||
name: Sqlness Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
sqlness-kafka-wal:
|
||||
name: Sqlness Test with Kafka Wal
|
||||
name: Sqlness Test (${{ matrix.mode.name }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
mode:
|
||||
- name: "Basic"
|
||||
- name: "Remote WAL"
|
||||
steps:
|
||||
- run: 'echo "No action required"'
|
||||
|
||||
27
.github/workflows/nightly-build.yml
vendored
27
.github/workflows/nightly-build.yml
vendored
@@ -66,6 +66,13 @@ env:
|
||||
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
|
||||
# Use the different image name to avoid conflict with the release images.
|
||||
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||
IMAGE_NAME: greptimedb-nightly
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
name: Allocate runners
|
||||
@@ -188,6 +195,7 @@ jobs:
|
||||
with:
|
||||
image-registry: docker.io
|
||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
@@ -220,7 +228,7 @@ jobs:
|
||||
with:
|
||||
src-image-registry: docker.io
|
||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||
src-image-name: greptimedb
|
||||
src-image-name: ${{ env.IMAGE_NAME }}
|
||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||
@@ -285,7 +293,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub
|
||||
@@ -294,16 +302,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy nightly build successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
- name: Notify nightly build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||
|
||||
- name: Notifiy nightly build failed result
|
||||
- name: Notify nightly build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
81
.github/workflows/nightly-ci.yml
vendored
81
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 1-5'
|
||||
- cron: "0 23 * * 1-5"
|
||||
workflow_dispatch:
|
||||
|
||||
name: Nightly CI
|
||||
@@ -10,7 +10,10 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
sqlness-test:
|
||||
@@ -22,7 +25,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run sqlness test
|
||||
uses: ./.github/actions/sqlness-test
|
||||
with:
|
||||
@@ -35,10 +37,11 @@ jobs:
|
||||
sqlness-windows:
|
||||
name: Sqlness tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -49,14 +52,6 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run sqlness
|
||||
run: cargo sqlness
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for sqlness tests"}
|
||||
- name: Upload sqlness logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -68,14 +63,18 @@ jobs:
|
||||
test-on-windows:
|
||||
name: Run tests on Windows
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: windows-latest-8-cores
|
||||
runs-on: windows-2022-8-cores
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "14.0"
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
@@ -88,7 +87,7 @@ jobs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: "3.10"
|
||||
- name: Install PyArrow Package
|
||||
run: pip install pyarrow
|
||||
- name: Install WSL distribution
|
||||
@@ -98,18 +97,62 @@ jobs:
|
||||
- name: Running tests
|
||||
run: cargo nextest run -F pyo3_backend,dashboard
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
- name: Notify slack if failed
|
||||
if: failure()
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
|
||||
check-status:
|
||||
name: Check status
|
||||
needs: [
|
||||
sqlness-test,
|
||||
sqlness-windows,
|
||||
test-on-windows,
|
||||
]
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||
steps:
|
||||
- name: Set check result
|
||||
id: set-check-result
|
||||
run: |
|
||||
echo "check-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
notification:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
check-status
|
||||
]
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
- name: Notify dev build successful result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed for cargo test"}
|
||||
{"text": "Nightly CI has completed successfully."}
|
||||
|
||||
- name: Notify dev build failed result
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
||||
# Use env variables to control all the release process.
|
||||
env:
|
||||
# The arguments of building greptime.
|
||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
||||
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||
CARGO_PROFILE: nightly
|
||||
|
||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||
@@ -91,7 +91,12 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.8.0
|
||||
NEXT_RELEASE_VERSION: v0.9.0
|
||||
|
||||
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
issues: write # Allows the action to create issues for cyborg.
|
||||
contents: write # Allows the action to create a release.
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
@@ -102,7 +107,7 @@ jobs:
|
||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||
windows-runner: windows-latest-8-cores
|
||||
windows-runner: windows-2022-8-cores
|
||||
|
||||
# The following EC2 resource id will be used for resource releasing.
|
||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||
@@ -245,7 +250,7 @@ jobs:
|
||||
- name: Set build macos result
|
||||
id: set-build-macos-result
|
||||
run: |
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
build-windows-artifacts:
|
||||
name: Build Windows artifacts
|
||||
@@ -318,7 +323,7 @@ jobs:
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
run: |
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||
|
||||
release-cn-artifacts:
|
||||
name: Release artifacts to CN region
|
||||
@@ -436,7 +441,7 @@ jobs:
|
||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
notification:
|
||||
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||
name: Send notification to Greptime team
|
||||
needs: [
|
||||
release-images-to-dockerhub,
|
||||
@@ -447,16 +452,25 @@ jobs:
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||
steps:
|
||||
- name: Notifiy release successful result
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Report CI status
|
||||
id: report-ci-status
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/report-ci-failure.ts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
- name: Notify release successful result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has completed successfully."}
|
||||
|
||||
- name: Notifiy release failed result
|
||||
- name: Notify release failed result
|
||||
uses: slackapi/slack-github-action@v1.25.0
|
||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||
with:
|
||||
payload: |
|
||||
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
|
||||
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||
|
||||
11
.github/workflows/schedule.yml
vendored
11
.github/workflows/schedule.yml
vendored
@@ -16,16 +16,7 @@ jobs:
|
||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Do Maintenance
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/schedule.ts
|
||||
|
||||
11
.github/workflows/semantic-pull-request.yml
vendored
11
.github/workflows/semantic-pull-request.yml
vendored
@@ -13,16 +13,7 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- uses: pnpm/action-setup@v3
|
||||
with:
|
||||
package_json_file: 'cyborg/package.json'
|
||||
run_install: true
|
||||
- name: Describe the Environment
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx -v
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
run: pnpm tsx bin/check-pull-request.ts
|
||||
|
||||
@@ -2,7 +2,14 @@
|
||||
|
||||
Thanks a lot for considering contributing to GreptimeDB. We believe people like you would make GreptimeDB a great product. We intend to build a community where individuals can have open talks, show respect for one another, and speak with true ❤️. Meanwhile, we are to keep transparency and make your effort count here.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate with respect to developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
You can find our contributors at https://github.com/GreptimeTeam/greptimedb/graphs/contributors. When you dedicate to GreptimeDB for a few months and keep bringing high-quality contributions (code, docs, advocate, etc.), you will be a candidate of a committer.
|
||||
|
||||
A committer will be granted both read & write access to GreptimeDB repos. Here is a list of current committers except GreptimeDB team members:
|
||||
|
||||
* [Eugene Tolbakov](https://github.com/etolbakov): PromQL support, SQL engine, InfluxDB APIs, and more.
|
||||
* [@NiwakaDev](https://github.com/NiwakaDev): SQL engine and storage layer.
|
||||
|
||||
Please read the guidelines, and they can help you get started. Communicate respectfully with the developers maintaining and developing the project. In return, they should reciprocate that respect by addressing your issue, reviewing changes, as well as helping finalize and merge your pull requests.
|
||||
|
||||
Follow our [README](https://github.com/GreptimeTeam/greptimedb#readme) to get the whole picture of the project. To learn about the design of GreptimeDB, please refer to the [design docs](https://github.com/GrepTimeTeam/docs).
|
||||
|
||||
|
||||
3287
Cargo.lock
generated
3287
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
51
Cargo.toml
51
Cargo.toml
@@ -1,9 +1,9 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"benchmarks",
|
||||
"src/api",
|
||||
"src/auth",
|
||||
"src/catalog",
|
||||
"src/cache",
|
||||
"src/client",
|
||||
"src/cmd",
|
||||
"src/common/base",
|
||||
@@ -45,6 +45,7 @@ members = [
|
||||
"src/object-store",
|
||||
"src/operator",
|
||||
"src/partition",
|
||||
"src/pipeline",
|
||||
"src/plugins",
|
||||
"src/promql",
|
||||
"src/puffin",
|
||||
@@ -63,13 +64,14 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.7.2"
|
||||
version = "0.8.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.readonly_write_lock = "allow"
|
||||
rust.unknown_lints = "deny"
|
||||
@@ -99,16 +101,18 @@ bytemuck = "1.12"
|
||||
bytes = { version = "1.5", features = ["serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||
@@ -116,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a70a6af9c69e40f9a918936a48717343402b4393" }
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
@@ -136,28 +140,31 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.4" }
|
||||
prost = "0.12"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
regex = "1.8"
|
||||
regex-automata = { version = "0.4" }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
"json",
|
||||
"rustls-tls-native-roots",
|
||||
"stream",
|
||||
"multipart",
|
||||
] }
|
||||
rskafka = "0.5"
|
||||
rstest = "0.21"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
schemars = "0.8"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.7"
|
||||
snafu = "0.8"
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.44.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||
"visitor",
|
||||
] }
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
@@ -166,13 +173,15 @@ tokio = { version = "1.36", features = ["full"] }
|
||||
tokio-stream = { version = "0.1" }
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
tonic = { version = "0.11", features = ["tls"] }
|
||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||
tower = { version = "0.4" }
|
||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||
zstd = "0.13"
|
||||
|
||||
## workspaces members
|
||||
api = { path = "src/api" }
|
||||
auth = { path = "src/auth" }
|
||||
cache = { path = "src/cache" }
|
||||
catalog = { path = "src/catalog" }
|
||||
client = { path = "src/client" }
|
||||
cmd = { path = "src/cmd" }
|
||||
@@ -204,6 +213,7 @@ common-wal = { path = "src/common/wal" }
|
||||
datanode = { path = "src/datanode" }
|
||||
datatypes = { path = "src/datatypes" }
|
||||
file-engine = { path = "src/file-engine" }
|
||||
flow = { path = "src/flow" }
|
||||
frontend = { path = "src/frontend" }
|
||||
index = { path = "src/index" }
|
||||
log-store = { path = "src/log-store" }
|
||||
@@ -214,6 +224,7 @@ mito2 = { path = "src/mito2" }
|
||||
object-store = { path = "src/object-store" }
|
||||
operator = { path = "src/operator" }
|
||||
partition = { path = "src/partition" }
|
||||
pipeline = { path = "src/pipeline" }
|
||||
plugins = { path = "src/plugins" }
|
||||
promql = { path = "src/promql" }
|
||||
puffin = { path = "src/puffin" }
|
||||
@@ -225,8 +236,6 @@ sql = { path = "src/sql" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
# TODO some code depends on this
|
||||
tests-integration = { path = "tests-integration" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
@@ -242,6 +251,14 @@ lto = "thin"
|
||||
debug = false
|
||||
incremental = false
|
||||
|
||||
[profile.ci]
|
||||
inherits = "dev"
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.sqlness-runner]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
[profile.dev.package.tests-fuzz]
|
||||
debug = false
|
||||
strip = true
|
||||
|
||||
17
Makefile
17
Makefile
@@ -163,6 +163,17 @@ nextest: ## Install nextest tools.
|
||||
sqlness-test: ## Run sqlness test.
|
||||
cargo sqlness
|
||||
|
||||
# Run fuzz test ${FUZZ_TARGET}.
|
||||
RUNS ?= 1
|
||||
FUZZ_TARGET ?= fuzz_alter_table
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||
|
||||
.PHONY: fuzz-ls
|
||||
fuzz-ls:
|
||||
cargo fuzz list --fuzz-dir tests-fuzz
|
||||
|
||||
.PHONY: check
|
||||
check: ## Cargo check all the targets.
|
||||
cargo check --workspace --all-targets --all-features
|
||||
@@ -194,12 +205,16 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||
|
||||
.PHONY: run-cluster-with-etcd
|
||||
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
|
||||
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||
|
||||
##@ Docs
|
||||
config-docs: ## Generate configuration documentation from toml files.
|
||||
docker run --rm \
|
||||
-v ${PWD}:/greptimedb \
|
||||
-w /greptimedb/config \
|
||||
toml2docs/toml2docs:latest \
|
||||
toml2docs/toml2docs:v0.1.1 \
|
||||
-p '##' \
|
||||
-t ./config-docs-template.md \
|
||||
-o ./config.md
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
api.workspace = true
|
||||
arrow.workspace = true
|
||||
chrono.workspace = true
|
||||
clap.workspace = true
|
||||
client.workspace = true
|
||||
common-base.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-wal.workspace = true
|
||||
dotenv.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
humantime.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
indicatif = "0.17.1"
|
||||
itertools.workspace = true
|
||||
lazy_static.workspace = true
|
||||
log-store.workspace = true
|
||||
mito2.workspace = true
|
||||
num_cpus.workspace = true
|
||||
parquet.workspace = true
|
||||
prometheus.workspace = true
|
||||
rand.workspace = true
|
||||
rskafka.workspace = true
|
||||
serde.workspace = true
|
||||
store-api.workspace = true
|
||||
# TODO depend `Database` client
|
||||
tests-integration.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
@@ -1,11 +0,0 @@
|
||||
Benchmarkers for GreptimeDB
|
||||
--------------------------------
|
||||
|
||||
## Wal Benchmarker
|
||||
The wal benchmarker serves to evaluate the performance of GreptimeDB's Write-Ahead Log (WAL) component. It meticulously assesses the read/write performance of the WAL under diverse workloads generated by the benchmarker.
|
||||
|
||||
|
||||
### How to use
|
||||
To compile the benchmarker, navigate to the `greptimedb/benchmarks` directory and execute `cargo build --release`. Subsequently, you'll find the compiled target located at `greptimedb/target/release/wal_bench`.
|
||||
|
||||
The `./wal_bench -h` command reveals numerous arguments that the target accepts. Among these, a notable one is the `cfg-file` argument. By utilizing a configuration file in the TOML format, you can bypass the need to repeatedly specify cumbersome arguments.
|
||||
@@ -1,21 +0,0 @@
|
||||
# Refers to the documents of `Args` in benchmarks/src/wal.rs`.
|
||||
wal_provider = "kafka"
|
||||
bootstrap_brokers = ["localhost:9092"]
|
||||
num_workers = 10
|
||||
num_topics = 32
|
||||
num_regions = 1000
|
||||
num_scrapes = 1000
|
||||
num_rows = 5
|
||||
col_types = "ifs"
|
||||
max_batch_size = "512KB"
|
||||
linger = "1ms"
|
||||
backoff_init = "10ms"
|
||||
backoff_max = "1ms"
|
||||
backoff_base = 2
|
||||
backoff_deadline = "3s"
|
||||
compression = "zstd"
|
||||
rng_seed = 42
|
||||
skip_read = false
|
||||
skip_write = false
|
||||
random_topics = true
|
||||
report_metrics = false
|
||||
@@ -1,326 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(int_roundings)]
|
||||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use api::v1::{ColumnDataType, ColumnSchema, SemanticType};
|
||||
use benchmarks::metrics;
|
||||
use benchmarks::wal_bench::{Args, Config, Region, WalProvider};
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use common_wal::config::kafka::common::BackoffConfig;
|
||||
use common_wal::config::kafka::DatanodeKafkaConfig as KafkaConfig;
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::options::{KafkaWalOptions, WalOptions};
|
||||
use itertools::Itertools;
|
||||
use log_store::kafka::log_store::KafkaLogStore;
|
||||
use log_store::raft_engine::log_store::RaftEngineLogStore;
|
||||
use mito2::wal::Wal;
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
use rand::distributions::{Alphanumeric, DistString};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::SeedableRng;
|
||||
use rskafka::client::partition::Compression;
|
||||
use rskafka::client::ClientBuilder;
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
async fn run_benchmarker<S: LogStore>(cfg: &Config, topics: &[String], wal: Arc<Wal<S>>) {
|
||||
let chunk_size = cfg.num_regions.div_ceil(cfg.num_workers);
|
||||
let region_chunks = (0..cfg.num_regions)
|
||||
.map(|id| {
|
||||
build_region(
|
||||
id as u64,
|
||||
topics,
|
||||
&mut SmallRng::seed_from_u64(cfg.rng_seed),
|
||||
cfg,
|
||||
)
|
||||
})
|
||||
.chunks(chunk_size as usize)
|
||||
.into_iter()
|
||||
.map(|chunk| Arc::new(chunk.collect::<Vec<_>>()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut write_elapsed = 0;
|
||||
let mut read_elapsed = 0;
|
||||
|
||||
if !cfg.skip_write {
|
||||
info!("Benchmarking write ...");
|
||||
|
||||
let num_scrapes = cfg.num_scrapes;
|
||||
let timer = Instant::now();
|
||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||
let wal = wal.clone();
|
||||
let regions = region_chunks[i as usize].clone();
|
||||
tokio::spawn(async move {
|
||||
for _ in 0..num_scrapes {
|
||||
let mut wal_writer = wal.writer();
|
||||
regions
|
||||
.iter()
|
||||
.for_each(|region| region.add_wal_entry(&mut wal_writer));
|
||||
wal_writer.write_to_wal().await.unwrap();
|
||||
}
|
||||
})
|
||||
}))
|
||||
.await;
|
||||
write_elapsed += timer.elapsed().as_millis();
|
||||
}
|
||||
|
||||
if !cfg.skip_read {
|
||||
info!("Benchmarking read ...");
|
||||
|
||||
let timer = Instant::now();
|
||||
futures::future::join_all((0..cfg.num_workers).map(|i| {
|
||||
let wal = wal.clone();
|
||||
let regions = region_chunks[i as usize].clone();
|
||||
tokio::spawn(async move {
|
||||
for region in regions.iter() {
|
||||
region.replay(&wal).await;
|
||||
}
|
||||
})
|
||||
}))
|
||||
.await;
|
||||
read_elapsed = timer.elapsed().as_millis();
|
||||
}
|
||||
|
||||
dump_report(cfg, write_elapsed, read_elapsed);
|
||||
}
|
||||
|
||||
fn build_region(id: u64, topics: &[String], rng: &mut SmallRng, cfg: &Config) -> Region {
|
||||
let wal_options = match cfg.wal_provider {
|
||||
WalProvider::Kafka => {
|
||||
assert!(!topics.is_empty());
|
||||
WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: topics.get(id as usize % topics.len()).cloned().unwrap(),
|
||||
})
|
||||
}
|
||||
WalProvider::RaftEngine => WalOptions::RaftEngine,
|
||||
};
|
||||
Region::new(
|
||||
RegionId::from_u64(id),
|
||||
build_schema(&parse_col_types(&cfg.col_types), rng),
|
||||
wal_options,
|
||||
cfg.num_rows,
|
||||
cfg.rng_seed,
|
||||
)
|
||||
}
|
||||
|
||||
fn build_schema(col_types: &[ColumnDataType], mut rng: &mut SmallRng) -> Vec<ColumnSchema> {
|
||||
col_types
|
||||
.iter()
|
||||
.map(|col_type| ColumnSchema {
|
||||
column_name: Alphanumeric.sample_string(&mut rng, 5),
|
||||
datatype: *col_type as i32,
|
||||
semantic_type: SemanticType::Field as i32,
|
||||
datatype_extension: None,
|
||||
})
|
||||
.chain(vec![ColumnSchema {
|
||||
column_name: "ts".to_string(),
|
||||
datatype: ColumnDataType::TimestampMillisecond as i32,
|
||||
semantic_type: SemanticType::Tag as i32,
|
||||
datatype_extension: None,
|
||||
}])
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn dump_report(cfg: &Config, write_elapsed: u128, read_elapsed: u128) {
|
||||
let cost_report = format!(
|
||||
"write costs: {} ms, read costs: {} ms",
|
||||
write_elapsed, read_elapsed,
|
||||
);
|
||||
|
||||
let total_written_bytes = metrics::METRIC_WAL_WRITE_BYTES_TOTAL.get() as u128;
|
||||
let write_throughput = if write_elapsed > 0 {
|
||||
(total_written_bytes * 1000).div_floor(write_elapsed)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let total_read_bytes = metrics::METRIC_WAL_READ_BYTES_TOTAL.get() as u128;
|
||||
let read_throughput = if read_elapsed > 0 {
|
||||
(total_read_bytes * 1000).div_floor(read_elapsed)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let throughput_report = format!(
|
||||
"total written bytes: {} bytes, total read bytes: {} bytes, write throuput: {} bytes/s ({} mb/s), read throughput: {} bytes/s ({} mb/s)",
|
||||
total_written_bytes,
|
||||
total_read_bytes,
|
||||
write_throughput,
|
||||
write_throughput.div_floor(1 << 20),
|
||||
read_throughput,
|
||||
read_throughput.div_floor(1 << 20),
|
||||
);
|
||||
|
||||
let metrics_report = if cfg.report_metrics {
|
||||
let mut buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
let metrics = prometheus::gather();
|
||||
encoder.encode(&metrics, &mut buffer).unwrap();
|
||||
String::from_utf8(buffer).unwrap()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
info!(
|
||||
r#"
|
||||
Benchmark config:
|
||||
{cfg:?}
|
||||
|
||||
Benchmark report:
|
||||
{cost_report}
|
||||
{throughput_report}
|
||||
{metrics_report}"#
|
||||
);
|
||||
}
|
||||
|
||||
async fn create_topics(cfg: &Config) -> Vec<String> {
|
||||
// Creates topics.
|
||||
let client = ClientBuilder::new(cfg.bootstrap_brokers.clone())
|
||||
.build()
|
||||
.await
|
||||
.unwrap();
|
||||
let ctrl_client = client.controller_client().unwrap();
|
||||
let (topics, tasks): (Vec<_>, Vec<_>) = (0..cfg.num_topics)
|
||||
.map(|i| {
|
||||
let topic = if cfg.random_topics {
|
||||
format!(
|
||||
"greptime_wal_bench_topic_{}_{}",
|
||||
uuid::Uuid::new_v4().as_u128(),
|
||||
i
|
||||
)
|
||||
} else {
|
||||
format!("greptime_wal_bench_topic_{}", i)
|
||||
};
|
||||
let task = ctrl_client.create_topic(
|
||||
topic.clone(),
|
||||
1,
|
||||
cfg.bootstrap_brokers.len() as i16,
|
||||
2000,
|
||||
);
|
||||
(topic, task)
|
||||
})
|
||||
.unzip();
|
||||
// Must ignore errors since we allow topics being created more than once.
|
||||
let _ = futures::future::try_join_all(tasks).await;
|
||||
|
||||
topics
|
||||
}
|
||||
|
||||
fn parse_compression(comp: &str) -> Compression {
|
||||
match comp {
|
||||
"no" => Compression::NoCompression,
|
||||
"gzip" => Compression::Gzip,
|
||||
"lz4" => Compression::Lz4,
|
||||
"snappy" => Compression::Snappy,
|
||||
"zstd" => Compression::Zstd,
|
||||
other => unreachable!("Unrecognized compression {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_col_types(col_types: &str) -> Vec<ColumnDataType> {
|
||||
let parts = col_types.split('x').collect::<Vec<_>>();
|
||||
assert!(parts.len() <= 2);
|
||||
|
||||
let pattern = parts[0];
|
||||
let repeat = parts
|
||||
.get(1)
|
||||
.map(|r| r.parse::<usize>().unwrap())
|
||||
.unwrap_or(1);
|
||||
|
||||
pattern
|
||||
.chars()
|
||||
.map(|c| match c {
|
||||
'i' | 'I' => ColumnDataType::Int64,
|
||||
'f' | 'F' => ColumnDataType::Float64,
|
||||
's' | 'S' => ColumnDataType::String,
|
||||
other => unreachable!("Cannot parse {other} as a column data type"),
|
||||
})
|
||||
.cycle()
|
||||
.take(pattern.len() * repeat)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Sets the global logging to INFO and suppress loggings from rskafka other than ERROR and upper ones.
|
||||
std::env::set_var("UNITTEST_LOG_LEVEL", "info,rskafka=error");
|
||||
common_telemetry::init_default_ut_logging();
|
||||
|
||||
let args = Args::parse();
|
||||
let cfg = if !args.cfg_file.is_empty() {
|
||||
toml::from_str(&fs::read_to_string(&args.cfg_file).unwrap()).unwrap()
|
||||
} else {
|
||||
Config::from(args)
|
||||
};
|
||||
|
||||
// Validates arguments.
|
||||
if cfg.num_regions < cfg.num_workers {
|
||||
panic!("num_regions must be greater than or equal to num_workers");
|
||||
}
|
||||
if cfg
|
||||
.num_workers
|
||||
.min(cfg.num_topics)
|
||||
.min(cfg.num_regions)
|
||||
.min(cfg.num_scrapes)
|
||||
.min(cfg.max_batch_size.as_bytes() as u32)
|
||||
.min(cfg.bootstrap_brokers.len() as u32)
|
||||
== 0
|
||||
{
|
||||
panic!("Invalid arguments");
|
||||
}
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on(async {
|
||||
match cfg.wal_provider {
|
||||
WalProvider::Kafka => {
|
||||
let topics = create_topics(&cfg).await;
|
||||
let kafka_cfg = KafkaConfig {
|
||||
broker_endpoints: cfg.bootstrap_brokers.clone(),
|
||||
max_batch_size: cfg.max_batch_size,
|
||||
linger: cfg.linger,
|
||||
backoff: BackoffConfig {
|
||||
init: cfg.backoff_init,
|
||||
max: cfg.backoff_max,
|
||||
base: cfg.backoff_base,
|
||||
deadline: Some(cfg.backoff_deadline),
|
||||
},
|
||||
compression: parse_compression(&cfg.compression),
|
||||
..Default::default()
|
||||
};
|
||||
let store = Arc::new(KafkaLogStore::try_new(&kafka_cfg).await.unwrap());
|
||||
let wal = Arc::new(Wal::new(store));
|
||||
run_benchmarker(&cfg, &topics, wal).await;
|
||||
}
|
||||
WalProvider::RaftEngine => {
|
||||
// The benchmarker assumes the raft engine directory exists.
|
||||
let store = RaftEngineLogStore::try_new(
|
||||
"/tmp/greptimedb/raft-engine-wal".to_string(),
|
||||
RaftEngineConfig::default(),
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.unwrap();
|
||||
let wal = Arc::new(Wal::new(store));
|
||||
run_benchmarker(&cfg, &[], wal).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::*;
|
||||
|
||||
/// Logstore label.
|
||||
pub const LOGSTORE_LABEL: &str = "logstore";
|
||||
/// Operation type label.
|
||||
pub const OPTYPE_LABEL: &str = "optype";
|
||||
|
||||
lazy_static! {
|
||||
/// Counters of bytes of each operation on a logstore.
|
||||
pub static ref METRIC_WAL_OP_BYTES_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"greptime_bench_wal_op_bytes_total",
|
||||
"wal operation bytes total",
|
||||
&[OPTYPE_LABEL],
|
||||
)
|
||||
.unwrap();
|
||||
/// Counter of bytes of the append_batch operation.
|
||||
pub static ref METRIC_WAL_WRITE_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||
&["write"],
|
||||
);
|
||||
/// Counter of bytes of the read operation.
|
||||
pub static ref METRIC_WAL_READ_BYTES_TOTAL: IntCounter = METRIC_WAL_OP_BYTES_TOTAL.with_label_values(
|
||||
&["read"],
|
||||
);
|
||||
}
|
||||
@@ -1,361 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::{AtomicI64, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::value::ValueData;
|
||||
use api::v1::{ColumnDataType, ColumnSchema, Mutation, OpType, Row, Rows, Value, WalEntry};
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_base::readable_size::ReadableSize;
|
||||
use common_wal::options::WalOptions;
|
||||
use futures::StreamExt;
|
||||
use mito2::wal::{Wal, WalWriter};
|
||||
use rand::distributions::{Alphanumeric, DistString, Uniform};
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use store_api::logstore::LogStore;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::metrics;
|
||||
|
||||
/// The wal provider.
|
||||
#[derive(Clone, ValueEnum, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum WalProvider {
|
||||
#[default]
|
||||
RaftEngine,
|
||||
Kafka,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct Args {
|
||||
/// The provided configuration file.
|
||||
/// The example configuration file can be found at `greptimedb/benchmarks/config/wal_bench.example.toml`.
|
||||
#[clap(long, short = 'c')]
|
||||
pub cfg_file: String,
|
||||
|
||||
/// The wal provider.
|
||||
#[clap(long, value_enum, default_value_t = WalProvider::default())]
|
||||
pub wal_provider: WalProvider,
|
||||
|
||||
/// The advertised addresses of the kafka brokers.
|
||||
/// If there're multiple bootstrap brokers, their addresses should be separated by comma, for e.g. "localhost:9092,localhost:9093".
|
||||
#[clap(long, short = 'b', default_value = "localhost:9092")]
|
||||
pub bootstrap_brokers: String,
|
||||
|
||||
/// The number of workers each running in a dedicated thread.
|
||||
#[clap(long, default_value_t = num_cpus::get() as u32)]
|
||||
pub num_workers: u32,
|
||||
|
||||
/// The number of kafka topics to be created.
|
||||
#[clap(long, default_value_t = 32)]
|
||||
pub num_topics: u32,
|
||||
|
||||
/// The number of regions.
|
||||
#[clap(long, default_value_t = 1000)]
|
||||
pub num_regions: u32,
|
||||
|
||||
/// The number of times each region is scraped.
|
||||
#[clap(long, default_value_t = 1000)]
|
||||
pub num_scrapes: u32,
|
||||
|
||||
/// The number of rows in each wal entry.
|
||||
/// Each time a region is scraped, a wal entry containing will be produced.
|
||||
#[clap(long, default_value_t = 5)]
|
||||
pub num_rows: u32,
|
||||
|
||||
/// The column types of the schema for each region.
|
||||
/// Currently, three column types are supported:
|
||||
/// - i = ColumnDataType::Int64
|
||||
/// - f = ColumnDataType::Float64
|
||||
/// - s = ColumnDataType::String
|
||||
/// For e.g., "ifs" will be parsed as three columns: i64, f64, and string.
|
||||
///
|
||||
/// Additionally, a "x" sign can be provided to repeat the column types for a given number of times.
|
||||
/// For e.g., "iix2" will be parsed as 4 columns: i64, i64, i64, and i64.
|
||||
/// This feature is useful if you want to specify many columns.
|
||||
#[clap(long, default_value = "ifs")]
|
||||
pub col_types: String,
|
||||
|
||||
/// The maximum size of a batch of kafka records.
|
||||
/// The default value is 1mb.
|
||||
#[clap(long, default_value = "512KB")]
|
||||
pub max_batch_size: ReadableSize,
|
||||
|
||||
/// The minimum latency the kafka client issues a batch of kafka records.
|
||||
/// However, a batch of kafka records would be immediately issued if a record cannot be fit into the batch.
|
||||
#[clap(long, default_value = "1ms")]
|
||||
pub linger: String,
|
||||
|
||||
/// The initial backoff delay of the kafka consumer.
|
||||
#[clap(long, default_value = "10ms")]
|
||||
pub backoff_init: String,
|
||||
|
||||
/// The maximum backoff delay of the kafka consumer.
|
||||
#[clap(long, default_value = "1s")]
|
||||
pub backoff_max: String,
|
||||
|
||||
/// The exponential backoff rate of the kafka consumer. The next back off = base * the current backoff.
|
||||
#[clap(long, default_value_t = 2)]
|
||||
pub backoff_base: u32,
|
||||
|
||||
/// The deadline of backoff. The backoff ends if the total backoff delay reaches the deadline.
|
||||
#[clap(long, default_value = "3s")]
|
||||
pub backoff_deadline: String,
|
||||
|
||||
/// The client-side compression algorithm for kafka records.
|
||||
#[clap(long, default_value = "zstd")]
|
||||
pub compression: String,
|
||||
|
||||
/// The seed of random number generators.
|
||||
#[clap(long, default_value_t = 42)]
|
||||
pub rng_seed: u64,
|
||||
|
||||
/// Skips the read phase, aka. region replay, if set to true.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub skip_read: bool,
|
||||
|
||||
/// Skips the write phase if set to true.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub skip_write: bool,
|
||||
|
||||
/// Randomly generates topic names if set to true.
|
||||
/// Useful when you want to run the benchmarker without worrying about the topics created before.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub random_topics: bool,
|
||||
|
||||
/// Logs out the gathered prometheus metrics when the benchmarker ends.
|
||||
#[clap(long, default_value_t = false)]
|
||||
pub report_metrics: bool,
|
||||
}
|
||||
|
||||
/// Benchmarker config.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub wal_provider: WalProvider,
|
||||
pub bootstrap_brokers: Vec<String>,
|
||||
pub num_workers: u32,
|
||||
pub num_topics: u32,
|
||||
pub num_regions: u32,
|
||||
pub num_scrapes: u32,
|
||||
pub num_rows: u32,
|
||||
pub col_types: String,
|
||||
pub max_batch_size: ReadableSize,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub linger: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_init: Duration,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_max: Duration,
|
||||
pub backoff_base: u32,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub backoff_deadline: Duration,
|
||||
pub compression: String,
|
||||
pub rng_seed: u64,
|
||||
pub skip_read: bool,
|
||||
pub skip_write: bool,
|
||||
pub random_topics: bool,
|
||||
pub report_metrics: bool,
|
||||
}
|
||||
|
||||
impl From<Args> for Config {
|
||||
fn from(args: Args) -> Self {
|
||||
let cfg = Self {
|
||||
wal_provider: args.wal_provider,
|
||||
bootstrap_brokers: args
|
||||
.bootstrap_brokers
|
||||
.split(',')
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>(),
|
||||
num_workers: args.num_workers.min(num_cpus::get() as u32),
|
||||
num_topics: args.num_topics,
|
||||
num_regions: args.num_regions,
|
||||
num_scrapes: args.num_scrapes,
|
||||
num_rows: args.num_rows,
|
||||
col_types: args.col_types,
|
||||
max_batch_size: args.max_batch_size,
|
||||
linger: humantime::parse_duration(&args.linger).unwrap(),
|
||||
backoff_init: humantime::parse_duration(&args.backoff_init).unwrap(),
|
||||
backoff_max: humantime::parse_duration(&args.backoff_max).unwrap(),
|
||||
backoff_base: args.backoff_base,
|
||||
backoff_deadline: humantime::parse_duration(&args.backoff_deadline).unwrap(),
|
||||
compression: args.compression,
|
||||
rng_seed: args.rng_seed,
|
||||
skip_read: args.skip_read,
|
||||
skip_write: args.skip_write,
|
||||
random_topics: args.random_topics,
|
||||
report_metrics: args.report_metrics,
|
||||
};
|
||||
|
||||
cfg
|
||||
}
|
||||
}
|
||||
|
||||
/// The region used for wal benchmarker.
|
||||
pub struct Region {
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
wal_options: WalOptions,
|
||||
next_sequence: AtomicU64,
|
||||
next_entry_id: AtomicU64,
|
||||
next_timestamp: AtomicI64,
|
||||
rng: Mutex<Option<SmallRng>>,
|
||||
num_rows: u32,
|
||||
}
|
||||
|
||||
impl Region {
|
||||
/// Creates a new region.
|
||||
pub fn new(
|
||||
id: RegionId,
|
||||
schema: Vec<ColumnSchema>,
|
||||
wal_options: WalOptions,
|
||||
num_rows: u32,
|
||||
rng_seed: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
schema,
|
||||
wal_options,
|
||||
next_sequence: AtomicU64::new(1),
|
||||
next_entry_id: AtomicU64::new(1),
|
||||
next_timestamp: AtomicI64::new(1655276557000),
|
||||
rng: Mutex::new(Some(SmallRng::seed_from_u64(rng_seed))),
|
||||
num_rows,
|
||||
}
|
||||
}
|
||||
|
||||
/// Scrapes the region and adds the generated entry to wal.
|
||||
pub fn add_wal_entry<S: LogStore>(&self, wal_writer: &mut WalWriter<S>) {
|
||||
let mutation = Mutation {
|
||||
op_type: OpType::Put as i32,
|
||||
sequence: self
|
||||
.next_sequence
|
||||
.fetch_add(self.num_rows as u64, Ordering::Relaxed),
|
||||
rows: Some(self.build_rows()),
|
||||
};
|
||||
let entry = WalEntry {
|
||||
mutations: vec![mutation],
|
||||
};
|
||||
metrics::METRIC_WAL_WRITE_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
|
||||
wal_writer
|
||||
.add_entry(
|
||||
self.id,
|
||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||
&entry,
|
||||
&self.wal_options,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Replays the region.
|
||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
||||
while let Some(res) = wal_stream.next().await {
|
||||
let (_, entry) = res.unwrap();
|
||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the estimated size in bytes of the entry.
|
||||
pub fn entry_estimated_size(entry: &WalEntry) -> usize {
|
||||
let wrapper_size = size_of::<WalEntry>()
|
||||
+ entry.mutations.capacity() * size_of::<Mutation>()
|
||||
+ size_of::<Rows>();
|
||||
|
||||
let rows = entry.mutations[0].rows.as_ref().unwrap();
|
||||
|
||||
let schema_size = rows.schema.capacity() * size_of::<ColumnSchema>()
|
||||
+ rows
|
||||
.schema
|
||||
.iter()
|
||||
.map(|s| s.column_name.capacity())
|
||||
.sum::<usize>();
|
||||
let values_size = (rows.rows.capacity() * size_of::<Row>())
|
||||
+ rows
|
||||
.rows
|
||||
.iter()
|
||||
.map(|r| r.values.capacity() * size_of::<Value>())
|
||||
.sum::<usize>();
|
||||
|
||||
wrapper_size + schema_size + values_size
|
||||
}
|
||||
|
||||
fn build_rows(&self) -> Rows {
|
||||
let cols = self
|
||||
.schema
|
||||
.iter()
|
||||
.map(|col_schema| {
|
||||
let col_data_type = ColumnDataType::try_from(col_schema.datatype).unwrap();
|
||||
self.build_col(&col_data_type, self.num_rows)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let rows = (0..self.num_rows)
|
||||
.map(|i| {
|
||||
let values = cols.iter().map(|col| col[i as usize].clone()).collect();
|
||||
Row { values }
|
||||
})
|
||||
.collect();
|
||||
|
||||
Rows {
|
||||
schema: self.schema.clone(),
|
||||
rows,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_col(&self, col_data_type: &ColumnDataType, num_rows: u32) -> Vec<Value> {
|
||||
let mut rng_guard = self.rng.lock().unwrap();
|
||||
let rng = rng_guard.as_mut().unwrap();
|
||||
match col_data_type {
|
||||
ColumnDataType::TimestampMillisecond => (0..num_rows)
|
||||
.map(|_| {
|
||||
let ts = self.next_timestamp.fetch_add(1000, Ordering::Relaxed);
|
||||
Value {
|
||||
value_data: Some(ValueData::TimestampMillisecondValue(ts)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::Int64 => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = rng.sample(Uniform::new(0, 10_000));
|
||||
Value {
|
||||
value_data: Some(ValueData::I64Value(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::Float64 => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = rng.sample(Uniform::new(0.0, 5000.0));
|
||||
Value {
|
||||
value_data: Some(ValueData::F64Value(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ColumnDataType::String => (0..num_rows)
|
||||
.map(|_| {
|
||||
let v = Alphanumeric.sample_string(rng, 10);
|
||||
Value {
|
||||
value_data: Some(ValueData::StringValue(v)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,16 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
{{ toml2docs "./standalone.example.toml" }}
|
||||
|
||||
## Cluster Mode
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
# Configurations
|
||||
|
||||
- [Standalone Mode](#standalone-mode)
|
||||
- [Distributed Mode](#distributed-mode)
|
||||
- [Frontend](#frontend)
|
||||
- [Metasrv](#metasrv)
|
||||
- [Datanode](#datanode)
|
||||
|
||||
## Standalone Mode
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
@@ -7,13 +13,22 @@
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
@@ -27,7 +42,7 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
@@ -51,8 +66,7 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -96,6 +110,10 @@
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -127,9 +145,11 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
## Cluster Mode
|
||||
## Distributed Mode
|
||||
|
||||
### Frontend
|
||||
|
||||
@@ -137,16 +157,26 @@
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `mysql` | -- | -- | MySQL server options. |
|
||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||
@@ -160,7 +190,7 @@
|
||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||
@@ -184,7 +214,6 @@
|
||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||
| `datanode` | -- | -- | Datanode options. |
|
||||
| `datanode.client` | -- | -- | Datanode client options. |
|
||||
| `datanode.client.timeout` | String | `10s` | -- |
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
@@ -203,6 +232,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Metasrv
|
||||
@@ -217,6 +248,10 @@
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
@@ -259,6 +294,8 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
|
||||
### Datanode
|
||||
@@ -269,12 +306,28 @@
|
||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
||||
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
||||
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
||||
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
| `rpc_addr` | String | `None` | Deprecated, use `grpc.addr` instead. |
|
||||
| `rpc_hostname` | String | `None` | Deprecated, use `grpc.hostname` instead. |
|
||||
| `rpc_runtime_size` | Integer | `None` | Deprecated, use `grpc.runtime_size` instead. |
|
||||
| `rpc_max_recv_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
|
||||
| `rpc_max_send_message_size` | String | `None` | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
| `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
|
||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||
| `runtime` | -- | -- | The runtime options. |
|
||||
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||
| `runtime.bg_rt_size` | Integer | `4` | The number of threads to execute the runtime for global background operations. |
|
||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
@@ -300,8 +353,7 @@
|
||||
| `wal.prefill_log_files` | Bool | `false` | Whether to pre-create log files on start up.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_period` | String | `10s` | Duration for fsyncing log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_size` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.linger` | String | `200ms` | The linger duration of a kafka batch producer.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
@@ -339,6 +391,10 @@
|
||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||
@@ -370,3 +426,5 @@
|
||||
| `export_metrics.remote_write` | -- | -- | -- |
|
||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||
|
||||
@@ -13,24 +13,71 @@ require_lease_before_startup = false
|
||||
## By default, it provides services after all regions have been initialized.
|
||||
init_regions_in_background = false
|
||||
|
||||
## The gRPC address of the datanode.
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
|
||||
## Parallelism of initializing regions.
|
||||
init_regions_parallelism = 16
|
||||
|
||||
## Deprecated, use `grpc.addr` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_addr = "127.0.0.1:3001"
|
||||
|
||||
## The hostname of the datanode.
|
||||
## Deprecated, use `grpc.hostname` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_hostname = "127.0.0.1"
|
||||
|
||||
## The number of gRPC server worker threads.
|
||||
## Deprecated, use `grpc.runtime_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_runtime_size = 8
|
||||
|
||||
## The maximum receive message size for gRPC server.
|
||||
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_recv_message_size = "512MB"
|
||||
|
||||
## The maximum send message size for gRPC server.
|
||||
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
|
||||
## +toml2docs:none-default
|
||||
rpc_max_send_message_size = "512MB"
|
||||
|
||||
## Enable telemetry to collect anonymous usage data.
|
||||
enable_telemetry = true
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:3001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
## The maximum receive message size for gRPC server.
|
||||
max_recv_message_size = "512MB"
|
||||
## The maximum send message size for gRPC server.
|
||||
max_send_message_size = "512MB"
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
@@ -120,11 +167,7 @@ broker_endpoints = ["127.0.0.1:9092"]
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_size = "1MB"
|
||||
|
||||
## The linger duration of a kafka batch producer.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
linger = "200ms"
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -324,6 +367,18 @@ vector_cache_size = "512MB"
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -428,3 +483,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -5,6 +5,15 @@ mode = "standalone"
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The heartbeat options.
|
||||
[heartbeat]
|
||||
## Interval for sending heartbeat messages to the metasrv.
|
||||
@@ -17,19 +26,40 @@ retry_interval = "3s"
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout.
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
[grpc]
|
||||
## The address to bind the gRPC server.
|
||||
addr = "127.0.0.1:4001"
|
||||
## The hostname advertised to the metasrv,
|
||||
## and used for connections from outside the host
|
||||
hostname = "127.0.0.1"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -70,7 +100,7 @@ addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
@@ -136,7 +166,6 @@ metadata_cache_tti = "5m"
|
||||
[datanode]
|
||||
## Datanode client options.
|
||||
[datanode.client]
|
||||
timeout = "10s"
|
||||
connect_timeout = "10s"
|
||||
tcp_nodelay = true
|
||||
|
||||
@@ -186,3 +215,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -25,6 +25,15 @@ enable_telemetry = true
|
||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||
store_key_prefix = ""
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## Procedure storage options.
|
||||
[procedure]
|
||||
|
||||
@@ -141,3 +150,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
@@ -8,14 +8,24 @@ enable_telemetry = true
|
||||
## +toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
|
||||
## The runtime options.
|
||||
[runtime]
|
||||
## The number of threads to execute the runtime for global read operations.
|
||||
read_rt_size = 8
|
||||
## The number of threads to execute the runtime for global write operations.
|
||||
write_rt_size = 8
|
||||
## The number of threads to execute the runtime for global background operations.
|
||||
bg_rt_size = 4
|
||||
|
||||
## The HTTP server options.
|
||||
[http]
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout.
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
## HTTP request body limit.
|
||||
## Support the following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
body_limit = "64MB"
|
||||
|
||||
## The gRPC server options.
|
||||
@@ -25,6 +35,23 @@ addr = "127.0.0.1:4001"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 8
|
||||
|
||||
## gRPC server TLS options, see `mysql.tls` section.
|
||||
[grpc.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
|
||||
## Certificate file path.
|
||||
## +toml2docs:none-default
|
||||
cert_path = ""
|
||||
|
||||
## Private key file path.
|
||||
## +toml2docs:none-default
|
||||
key_path = ""
|
||||
|
||||
## Watch for Certificate and key file change and auto reload.
|
||||
## For now, gRPC tls config does not support auto reload.
|
||||
watch = false
|
||||
|
||||
## MySQL server options.
|
||||
[mysql]
|
||||
## Whether to enable.
|
||||
@@ -65,7 +92,7 @@ addr = "127.0.0.1:4003"
|
||||
## The number of server worker threads.
|
||||
runtime_size = 2
|
||||
|
||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
||||
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||
[postgres.tls]
|
||||
## TLS mode.
|
||||
mode = "disable"
|
||||
@@ -149,11 +176,7 @@ broker_endpoints = ["127.0.0.1:9092"]
|
||||
## The max size of a single producer batch.
|
||||
## Warning: Kafka has a default limit of 1MB per message in a topic.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
max_batch_size = "1MB"
|
||||
|
||||
## The linger duration of a kafka batch producer.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
linger = "200ms"
|
||||
max_batch_bytes = "1MB"
|
||||
|
||||
## The consumer wait timeout.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
@@ -367,6 +390,18 @@ vector_cache_size = "512MB"
|
||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||
page_cache_size = "512MB"
|
||||
|
||||
## Whether to enable the experimental write cache.
|
||||
enable_experimental_write_cache = false
|
||||
|
||||
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||
experimental_write_cache_path = ""
|
||||
|
||||
## Capacity for write cache.
|
||||
experimental_write_cache_size = "512MB"
|
||||
|
||||
## TTL for write cache.
|
||||
experimental_write_cache_ttl = "1h"
|
||||
|
||||
## Buffer size for SST writing.
|
||||
sst_write_buffer_size = "8MB"
|
||||
|
||||
@@ -471,3 +506,9 @@ url = ""
|
||||
|
||||
## HTTP headers of Prometheus remote-write carry.
|
||||
headers = { }
|
||||
|
||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||
[tracing]
|
||||
## The tokio console address.
|
||||
## +toml2docs:none-default
|
||||
tokio_console_addr = "127.0.0.1"
|
||||
|
||||
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common";
|
||||
import {context} from "@actions/github";
|
||||
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||
import {RequestError} from "@octokit/request-error";
|
||||
|
||||
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||
const labelDocsNotRequired = "docs-not-required"
|
||||
const labelDocsRequired = "docs-required"
|
||||
|
||||
async function main() {
|
||||
if (!context.payload.pull_request) {
|
||||
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||
}
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||
const payload = context.payload as PullRequestEvent
|
||||
const { owner, repo, number, actor, title, html_url } = {
|
||||
owner: payload.pull_request.base.user.login,
|
||||
repo: payload.pull_request.base.repo.name,
|
||||
number: payload.pull_request.number,
|
||||
title: payload.pull_request.title,
|
||||
html_url: payload.pull_request.html_url,
|
||||
actor: payload.pull_request.user.login,
|
||||
}
|
||||
const followUpDocs = checkPullRequestEvent(payload)
|
||||
if (followUpDocs) {
|
||||
core.info("Follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
} else {
|
||||
core.info("No need to follow up docs.")
|
||||
await client.rest.issues.removeLabel({
|
||||
owner, repo, issue_number: number, name: labelDocsRequired
|
||||
}).catch((e: RequestError) => {
|
||||
if (e.status != 404) {
|
||||
throw e;
|
||||
}
|
||||
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||
})
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||
switch (payload.action) {
|
||||
case "opened":
|
||||
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||
case "edited":
|
||||
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||
default:
|
||||
throw new Error(`${payload.action} is unsupported.`)
|
||||
}
|
||||
}
|
||||
|
||||
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||
// @ts-ignore
|
||||
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||
}
|
||||
|
||||
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||
// from docs-not-need to docs-required
|
||||
return (!previous) && current
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
83
cyborg/bin/report-ci-failure.ts
Normal file
83
cyborg/bin/report-ci-failure.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright 2023 Greptime Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import * as core from '@actions/core'
|
||||
import {handleError, obtainClient} from "@/common"
|
||||
import {context} from "@actions/github"
|
||||
import _ from "lodash"
|
||||
|
||||
async function main() {
|
||||
const success = process.env["CI_REPORT_STATUS"] === "true"
|
||||
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
|
||||
|
||||
const client = obtainClient("GITHUB_TOKEN")
|
||||
const title = `Workflow run '${context.workflow}' failed`
|
||||
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
|
||||
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
|
||||
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
||||
|
||||
const {owner, repo} = context.repo
|
||||
const labels = ['O-ci-failure']
|
||||
|
||||
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||
owner,
|
||||
repo,
|
||||
labels: labels.join(','),
|
||||
state: "open",
|
||||
sort: "created",
|
||||
direction: "desc",
|
||||
});
|
||||
const issue = _.find(issues, (i) => i.title === title);
|
||||
|
||||
if (issue) { // exist issue
|
||||
core.info(`Found previous issue ${issue.html_url}`)
|
||||
if (!success) {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: failure_comment,
|
||||
})
|
||||
} else {
|
||||
await client.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
body: success_comment,
|
||||
})
|
||||
await client.rest.issues.update({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issue.number,
|
||||
state: "closed",
|
||||
state_reason: "completed",
|
||||
})
|
||||
}
|
||||
core.setOutput("html_url", issue.html_url)
|
||||
} else if (!success) { // create new issue for failure
|
||||
const issue = await client.rest.issues.create({
|
||||
owner,
|
||||
repo,
|
||||
title,
|
||||
labels,
|
||||
body: failure_comment,
|
||||
})
|
||||
core.info(`Created issue ${issue.data.html_url}`)
|
||||
core.setOutput("html_url", issue.data.html_url)
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(handleError)
|
||||
@@ -7,6 +7,7 @@
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.0",
|
||||
"@octokit/request-error": "^6.1.1",
|
||||
"@octokit/webhooks-types": "^7.5.1",
|
||||
"conventional-commit-types": "^3.0.0",
|
||||
"conventional-commits-parser": "^5.0.0",
|
||||
|
||||
10
cyborg/pnpm-lock.yaml
generated
10
cyborg/pnpm-lock.yaml
generated
@@ -11,6 +11,9 @@ dependencies:
|
||||
'@actions/github':
|
||||
specifier: ^6.0.0
|
||||
version: 6.0.0
|
||||
'@octokit/request-error':
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
'@octokit/webhooks-types':
|
||||
specifier: ^7.5.1
|
||||
version: 7.5.1
|
||||
@@ -359,6 +362,13 @@ packages:
|
||||
once: 1.4.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request-error@6.1.1:
|
||||
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
|
||||
engines: {node: '>= 18'}
|
||||
dependencies:
|
||||
'@octokit/types': 13.5.0
|
||||
dev: false
|
||||
|
||||
/@octokit/request@8.4.0:
|
||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||
engines: {node: '>= 18'}
|
||||
|
||||
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# The binary name of GreptimeDB executable.
|
||||
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||
ARG TARGET_BIN=greptime
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
ca-certificates \
|
||||
curl
|
||||
|
||||
ARG BINARY_PATH
|
||||
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
|
||||
|
||||
ENV PATH /greptime/bin/:$PATH
|
||||
|
||||
ENTRYPOINT ["greptime"]
|
||||
@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
||||
RUN rustup target add aarch64-linux-android
|
||||
|
||||
# Install cargo-ndk
|
||||
RUN cargo install cargo-ndk
|
||||
RUN cargo install cargo-ndk@3.5.4
|
||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||
|
||||
# Builder entrypoint.
|
||||
|
||||
102
docker/docker-compose/cluster-with-etcd.yaml
Normal file
102
docker/docker-compose/cluster-with-etcd.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
x-custom:
|
||||
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||
common_settings: &common_settings
|
||||
image: quay.io/coreos/etcd:v3.5.10
|
||||
entrypoint: /usr/local/bin/etcd
|
||||
|
||||
services:
|
||||
etcd0:
|
||||
<<: *common_settings
|
||||
container_name: etcd0
|
||||
ports:
|
||||
- 2379:2379
|
||||
- 2380:2380
|
||||
command:
|
||||
- --name=etcd0
|
||||
- --data-dir=/var/lib/etcd
|
||||
- --initial-advertise-peer-urls=http://etcd0:2380
|
||||
- --listen-peer-urls=http://0.0.0.0:2380
|
||||
- --listen-client-urls=http://0.0.0.0:2379
|
||||
- --advertise-client-urls=http://etcd0:2379
|
||||
- --heartbeat-interval=250
|
||||
- --election-timeout=1250
|
||||
- --initial-cluster=etcd0=http://etcd0:2380
|
||||
- --initial-cluster-state=new
|
||||
- *initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
metasrv:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
container_name: metasrv
|
||||
ports:
|
||||
- 3002:3002
|
||||
command:
|
||||
- metasrv
|
||||
- start
|
||||
- --bind-addr=0.0.0.0:3002
|
||||
- --server-addr=metasrv:3002
|
||||
- --store-addrs=etcd0:2379
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
depends_on:
|
||||
etcd0:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
datanode0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
container_name: datanode0
|
||||
ports:
|
||||
- 3001:3001
|
||||
command:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --rpc-addr=0.0.0.0:3001
|
||||
- --rpc-hostname=datanode0:3001
|
||||
- --metasrv-addr=metasrv:3002
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
frontend0:
|
||||
image: docker.io/greptime/greptimedb:latest
|
||||
container_name: frontend0
|
||||
ports:
|
||||
- 4000:4000
|
||||
- 4001:4001
|
||||
- 4002:4002
|
||||
- 4003:4003
|
||||
command:
|
||||
- frontend
|
||||
- start
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:4000
|
||||
- --rpc-addr=0.0.0.0:4001
|
||||
- --mysql-addr=0.0.0.0:4002
|
||||
- --postgres-addr=0.0.0.0:4003
|
||||
depends_on:
|
||||
metasrv:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- greptimedb
|
||||
|
||||
networks:
|
||||
greptimedb:
|
||||
name: greptimedb
|
||||
253
docs/benchmarks/tsbs/README.md
Normal file
253
docs/benchmarks/tsbs/README.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# How to run TSBS Benchmark
|
||||
|
||||
This document contains the steps to run TSBS Benchmark. Our results are listed in other files in the same directory.
|
||||
|
||||
## Prerequires
|
||||
|
||||
You need the following tools to run TSBS Benchmark:
|
||||
- Go
|
||||
- git
|
||||
- make
|
||||
- rust (optional, if you want to build the DB from source)
|
||||
|
||||
## Build TSBS suite
|
||||
|
||||
Clone our fork of TSBS:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/GreptimeTeam/tsbs.git
|
||||
```
|
||||
|
||||
Then build it:
|
||||
|
||||
```shell
|
||||
cd tsbs
|
||||
make
|
||||
```
|
||||
|
||||
You can check the `bin/` directory for compiled binaries. We will only use some of them.
|
||||
|
||||
```shell
|
||||
ls ./bin/
|
||||
```
|
||||
|
||||
Binaries we will use later:
|
||||
- `tsbs_generate_data`
|
||||
- `tsbs_generate_queries`
|
||||
- `tsbs_load_greptime`
|
||||
- `tsbs_run_queries_influx`
|
||||
|
||||
## Generate test data and queries
|
||||
|
||||
The data is generated by `tsbs_generate_data`
|
||||
|
||||
```shell
|
||||
mkdir bench-data
|
||||
./bin/tsbs_generate_data --use-case="cpu-only" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:00Z" \
|
||||
--log-interval="10s" --format="influx" \
|
||||
> ./bench-data/influx-data.lp
|
||||
```
|
||||
|
||||
Here we generates 4000 time-series in 3 days with 10s interval. We'll use influx line protocol to write so the target format is `influx`.
|
||||
|
||||
Queries are generated by `tsbs_generate_queries`. You can change the parameters but need to make sure it matches with `tsbs_generate_data`.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type cpu-max-all-8 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-cpu-max-all-8.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-5 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-5.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type double-groupby-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-double-groupby-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type groupby-orderby-limit \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-groupby-orderby-limit.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type high-cpu-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=50 \
|
||||
--query-type high-cpu-all \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-high-cpu-all.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=10 \
|
||||
--query-type lastpoint \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-lastpoint.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-1-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-1-8-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-1.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-1-12 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-1-12.dat
|
||||
./bin/tsbs_generate_queries \
|
||||
--use-case="devops" --seed=123 --scale=4000 \
|
||||
--timestamp-start="2023-06-11T00:00:00Z" \
|
||||
--timestamp-end="2023-06-14T00:00:01Z" \
|
||||
--queries=100 \
|
||||
--query-type single-groupby-5-8-1 \
|
||||
--format="greptime" \
|
||||
> ./bench-data/greptime-queries-single-groupby-5-8-1.dat
|
||||
```
|
||||
|
||||
## Start GreptimeDB
|
||||
|
||||
Reference to our [document](https://docs.greptime.com/getting-started/installation/overview) for how to install and start a GreptimeDB. Or you can also check this [document](https://docs.greptime.com/contributor-guide/getting-started#compile-and-run) for how to build a GreptimeDB from source.
|
||||
|
||||
## Write Data
|
||||
|
||||
After the DB is started, we can use `tsbs_load_greptime` to test the write performance.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_load_greptime \
|
||||
--urls=http://localhost:4000 \
|
||||
--file=./bench-data/influx-data.lp \
|
||||
--batch-size=3000 \
|
||||
--gzip=false \
|
||||
--workers=6
|
||||
```
|
||||
|
||||
Parameters here are only provided as an example. You can choose whatever you like or adjust them to match your target scenario.
|
||||
|
||||
Notice that if you want to rerun `tsbs_load_greptime`, please destroy and restart the DB and clear its previous data first. Existing duplicated data will impact the write and query performance.
|
||||
|
||||
## Query Data
|
||||
|
||||
After the data is imported, you can then run queries. The following script runs all queries. You can also choose a subset of queries to run.
|
||||
|
||||
```shell
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-cpu-max-all-8.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-5.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-double-groupby-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-groupby-orderby-limit.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-high-cpu-all.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-lastpoint.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-1-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-1-12.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
./bin/tsbs_run_queries_influx --file=./bench-data/greptime-queries-single-groupby-5-8-1.dat \
|
||||
--db-name=benchmark \
|
||||
--urls="http://localhost:4000"
|
||||
```
|
||||
|
||||
Rerun queries need not to re-import data. Just execute the corresponding command again is fine.
|
||||
@@ -23,28 +23,28 @@
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| ------------------ | --------------------- |
|
||||
| Local | 3695814.64 |
|
||||
| EC2 c5d.2xlarge | 2987166.64 |
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 369581.464 |
|
||||
| EC2 c5d.2xlarge | 298716.664 |
|
||||
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | ---------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||
| double-groupby-1 | 664.30 | 1366.63 |
|
||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||
| double-groupby-all | 2828.94 | 3389.59 |
|
||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||
| high-cpu-1 | 29.21 | 52.98 |
|
||||
| high-cpu-all | 5514.12 | 7194.91 |
|
||||
| lastpoint | 7571.40 | 9423.41 |
|
||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||
|
||||
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TSBS benchmark - v0.8.0
|
||||
|
||||
## Environment
|
||||
|
||||
### Local
|
||||
|
||||
| | |
|
||||
| ------ | ---------------------------------- |
|
||||
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||
| Memory | 32GB |
|
||||
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||
| OS | Ubuntu 22.04.2 LTS |
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
| | |
|
||||
| ------- | -------------- |
|
||||
| Machine | c5d.2xlarge |
|
||||
| CPU | 8 core |
|
||||
| Memory | 16GB |
|
||||
| Disk | 50GB (GP3) |
|
||||
| OS | Ubuntu 22.04.1 |
|
||||
|
||||
## Write performance
|
||||
|
||||
| Environment | Ingest rate (rows/s) |
|
||||
| --------------- | -------------------- |
|
||||
| Local | 315369.66 |
|
||||
| EC2 c5d.2xlarge | 222148.56 |
|
||||
|
||||
## Query performance
|
||||
|
||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| cpu-max-all-1 | 24.63 | 15.29 |
|
||||
| cpu-max-all-8 | 51.69 | 33.53 |
|
||||
| double-groupby-1 | 673.51 | 1295.38 |
|
||||
| double-groupby-5 | 1244.93 | 1993.91 |
|
||||
| double-groupby-all | 2215.44 | 3056.77 |
|
||||
| groupby-orderby-limit | 754.50 | 1546.49 |
|
||||
| high-cpu-1 | 19.62 | 11.58 |
|
||||
| high-cpu-all | 5402.31 | 8011.43 |
|
||||
| lastpoint | 6756.12 | 9312.67 |
|
||||
| single-groupby-1-1-1 | 15.70 | 7.67 |
|
||||
| single-groupby-1-1-12 | 16.72 | 9.29 |
|
||||
| single-groupby-1-8-1 | 26.72 | 17.97 |
|
||||
| single-groupby-5-1-1 | 18.17 | 10.09 |
|
||||
| single-groupby-5-1-12 | 20.04 | 12.37 |
|
||||
| single-groupby-5-8-1 | 35.63 | 23.13 |
|
||||
|
||||
`single-groupby-1-1-1` query throughput
|
||||
|
||||
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||
| --------------- | ------------------ | -------------- | ----------------- |
|
||||
| Local | 50 | 42.87 | 1165.73 |
|
||||
| Local | 100 | 89.29 | 1119.38 |
|
||||
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
|
||||
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |
|
||||
@@ -1,527 +0,0 @@
|
||||
# Schema Structs
|
||||
|
||||
# Common Schemas
|
||||
The `datatypes` crate defines the elementary schema struct to describe the metadata.
|
||||
|
||||
## ColumnSchema
|
||||
[ColumnSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/column_schema.rs#L36) represents the metadata of a column. It is equivalent to arrow's [Field](https://docs.rs/arrow/latest/arrow/datatypes/struct.Field.html) with additional metadata such as default constraint and whether the column is a time index. The time index is the column with a `TIME INDEX` constraint of a table. We can convert the `ColumnSchema` into an arrow `Field` and convert the `Field` back to the `ColumnSchema` without losing metadata.
|
||||
|
||||
```rust
|
||||
pub struct ColumnSchema {
|
||||
pub name: String,
|
||||
pub data_type: ConcreteDataType,
|
||||
is_nullable: bool,
|
||||
is_time_index: bool,
|
||||
default_constraint: Option<ColumnDefaultConstraint>,
|
||||
metadata: Metadata,
|
||||
}
|
||||
```
|
||||
|
||||
## Schema
|
||||
[Schema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema.rs#L38) is an ordered sequence of `ColumnSchema`. It is equivalent to arrow's [Schema](https://docs.rs/arrow/latest/arrow/datatypes/struct.Schema.html) with additional metadata including the index of the time index column and the version of this schema. Same as `ColumnSchema`, we can convert our `Schema` from/to arrow's `Schema`.
|
||||
|
||||
```rust
|
||||
use arrow::datatypes::Schema as ArrowSchema;
|
||||
|
||||
pub struct Schema {
|
||||
column_schemas: Vec<ColumnSchema>,
|
||||
name_to_index: HashMap<String, usize>,
|
||||
arrow_schema: Arc<ArrowSchema>,
|
||||
timestamp_index: Option<usize>,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
pub type SchemaRef = Arc<Schema>;
|
||||
```
|
||||
|
||||
We alias `Arc<Schema>` as `SchemaRef` since it is used frequently. Mostly, we use our `ColumnSchema` and `Schema` structs instead of Arrow's `Field` and `Schema` unless we need to invoke third-party libraries (like DataFusion or ArrowFlight) that rely on Arrow.
|
||||
|
||||
## RawSchema
|
||||
`Schema` contains fields like a map from column names to their indices in the `ColumnSchema` sequences and a cached arrow `Schema`. We can construct these fields from the `ColumnSchema` sequences thus we don't want to serialize them. This is why we don't derive `Serialize` and `Deserialize` for `Schema`. We introduce a new struct [RawSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/datatypes/src/schema/raw.rs#L24) which keeps all required fields of a `Schema` and derives the serialization traits. To serialize a `Schema`, we need to convert it into a `RawSchema` first and serialize the `RawSchema`.
|
||||
|
||||
```rust
|
||||
pub struct RawSchema {
|
||||
pub column_schemas: Vec<ColumnSchema>,
|
||||
pub timestamp_index: Option<usize>,
|
||||
pub version: u32,
|
||||
}
|
||||
```
|
||||
|
||||
We want to keep the `Schema` simple and avoid putting too much business-related metadata in it as many different structs or traits rely on it.
|
||||
|
||||
# Schema of the Table
|
||||
A table maintains its schema in [TableMeta](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/table/src/metadata.rs#L97).
|
||||
```rust
|
||||
pub struct TableMeta {
|
||||
pub schema: SchemaRef,
|
||||
pub primary_key_indices: Vec<usize>,
|
||||
pub value_indices: Vec<usize>,
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
The order of columns in `TableMeta::schema` is the same as the order specified in the `CREATE TABLE` statement which users use to create this table.
|
||||
|
||||
The field `primary_key_indices` stores indices of primary key columns. The field `value_indices` records the indices of value columns (non-primary key and time index, we sometimes call them field columns).
|
||||
|
||||
Suppose we create a table with the following SQL
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
```
|
||||
|
||||
Then the table's `TableMeta` may look like this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"host",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"datacenter"
|
||||
],
|
||||
"time_index":0,
|
||||
"version":0
|
||||
},
|
||||
"primary_key_indices":[
|
||||
4,
|
||||
1
|
||||
],
|
||||
"value_indices":[
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Schemas of the storage engine
|
||||
We split a table into one or more units with the same schema and then store these units in the storage engine. Each unit is a region in the storage engine.
|
||||
|
||||
The storage engine maintains schemas of regions in more complicated ways because it
|
||||
- adds internal columns that are invisible to users to store additional metadata for each row
|
||||
- provides a data model similar to the key-value model so it organizes columns in a different order
|
||||
- maintains additional metadata like column id or column family
|
||||
|
||||
So the storage engine defines several schema structs:
|
||||
- RegionSchema
|
||||
- StoreSchema
|
||||
- ProjectedSchema
|
||||
|
||||
## RegionSchema
|
||||
A [RegionSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/region.rs#L37) describes the schema of a region.
|
||||
|
||||
```rust
|
||||
pub struct RegionSchema {
|
||||
user_schema: SchemaRef,
|
||||
store_schema: StoreSchemaRef,
|
||||
columns: ColumnsMetadataRef,
|
||||
}
|
||||
```
|
||||
|
||||
Each region reserves some columns called `internal columns` for internal usage:
|
||||
- `__sequence`, sequence number of a row
|
||||
- `__op_type`, operation type of a row, such as `PUT` or `DELETE`
|
||||
- `__version`, user-specified version of a row, reserved but not used. We might remove this in the future
|
||||
|
||||
The table engine can't see the `__sequence` and `__op_type` columns, so the `RegionSchema` itself maintains two internal schemas:
|
||||
- User schema, a `Schema` struct that doesn't have internal columns
|
||||
- Store schema, a `StoreSchema` struct that has internal columns
|
||||
|
||||
The `ColumnsMetadata` struct keeps metadata about all columns but most time we only need to use metadata in user schema and store schema, so we just ignore it. We may remove this struct in the future.
|
||||
|
||||
`RegionSchema` organizes columns in the following order:
|
||||
```
|
||||
key columns, timestamp, [__version,] value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
We can ignore the `__version` column because it is disabled now:
|
||||
|
||||
```
|
||||
key columns, timestamp, value columns, __sequence, __op_type
|
||||
```
|
||||
|
||||
Key columns are columns of a table's primary key. Timestamp is the time index column. A region sorts all rows by key columns, timestamp, sequence, and op type.
|
||||
|
||||
So the `RegionSchema` of our `cpu` table above looks like this:
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
"store_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## StoreSchema
|
||||
As described above, a [StoreSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/store.rs#L36) is a schema that knows all internal columns.
|
||||
```rust
|
||||
struct StoreSchema {
|
||||
columns: Vec<ColumnMetadata>,
|
||||
schema: SchemaRef,
|
||||
row_key_end: usize,
|
||||
user_column_end: usize,
|
||||
}
|
||||
```
|
||||
|
||||
The columns in the `columns` and `schema` fields have the same order. The `ColumnMetadata` has metadata like column id, column family id, and comment. The `StoreSchema` also stores this metadata in `StoreSchema::schema`, so we can convert the `StoreSchema` between arrow's `Schema`. We use this feature to persist the `StoreSchema` in the SST since our SST format is `Parquet`, which can take arrow's `Schema` as its schema.
|
||||
|
||||
The `StoreSchema` of the region above is similar to this:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
}
|
||||
```
|
||||
|
||||
The key and timestamp columns form row keys of rows. We put them together so we can use `row_key_end` to get indices of all row key columns. Similarly, we can use the `user_column_end` to get indices of all user columns (non-internal columns).
|
||||
```rust
|
||||
impl StoreSchema {
|
||||
#[inline]
|
||||
pub(crate) fn row_key_indices(&self) -> impl Iterator<Item = usize> {
|
||||
0..self.row_key_end
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
|
||||
self.row_key_end..self.user_column_end
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Another useful feature of `StoreSchema` is that we ensure it always contains key columns, a timestamp column, and internal columns because we need them to perform merge, deduplication, and delete. Projection on `StoreSchema` only projects value columns.
|
||||
|
||||
## ProjectedSchema
|
||||
To support arbitrary projection, we introduce the [ProjectedSchema](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/projected.rs#L106).
|
||||
```rust
|
||||
pub struct ProjectedSchema {
|
||||
projection: Option<Projection>,
|
||||
schema_to_read: StoreSchemaRef,
|
||||
projected_user_schema: SchemaRef,
|
||||
}
|
||||
```
|
||||
|
||||
We need to handle many cases while doing projection:
|
||||
- The columns' order of table and region is different
|
||||
- The projection can be in arbitrary order, e.g. `select usage_user, host from cpu` and `select host, usage_user from cpu` have different projection order
|
||||
- We support `ALTER TABLE` so data files may have different schemas.
|
||||
|
||||
### Projection
|
||||
Let's take an example to see how projection works. Suppose we want to select `ts`, `usage_system` from the `cpu` table.
|
||||
|
||||
```sql
|
||||
CREATE TABLE cpu (
|
||||
ts TIMESTAMP,
|
||||
host STRING,
|
||||
usage_user DOUBLE,
|
||||
usage_system DOUBLE,
|
||||
datacenter STRING,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(datacenter, host)) ENGINE=mito;
|
||||
|
||||
select ts, usage_system from cpu;
|
||||
```
|
||||
|
||||
The query engine uses the projection `[0, 3]` to scan the table. However, columns in the region have a different order, so the table engine adjusts the projection to `2, 4`.
|
||||
```json
|
||||
{
|
||||
"user_schema":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system"
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, the output order is still `[ts, usage_system]`. This is the schema users can see after projection so we call it `projected user schema`.
|
||||
|
||||
But the storage engine also needs to read key columns, a timestamp column, and internal columns. So we maintain a `StoreSchema` after projection in the `ProjectedSchema`.
|
||||
|
||||
The `Projection` struct is a helper struct to help compute the projected user schema and store schema.
|
||||
|
||||
So we can construct the following `ProjectedSchema`:
|
||||
```json
|
||||
{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":4
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, `schema_to_read` doesn't contain the column `usage_user` that is not intended to be read (not in projection).
|
||||
|
||||
### ReadAdapter
|
||||
As mentioned above, we can alter a table so the underlying files (SSTs) and memtables in the storage engine may have different schemas.
|
||||
|
||||
To simplify the logic of `ProjectedSchema`, we handle the difference between schemas before projection (constructing the `ProjectedSchema`). We introduce [ReadAdapter](https://github.com/GreptimeTeam/greptimedb/blob/9fa871a3fad07f583dc1863a509414da393747f8/src/storage/src/schema/compat.rs#L90) that adapts rows with different source schemas to the same expected schema.
|
||||
|
||||
So we can always use the current `RegionSchema` of the region to construct the `ProjectedSchema`, and then create a `ReadAdapter` for each memtable or SST.
|
||||
```rust
|
||||
#[derive(Debug)]
|
||||
pub struct ReadAdapter {
|
||||
source_schema: StoreSchemaRef,
|
||||
dest_schema: ProjectedSchemaRef,
|
||||
indices_in_result: Vec<Option<usize>>,
|
||||
is_source_needed: Vec<bool>,
|
||||
}
|
||||
```
|
||||
|
||||
For each column required by `dest_schema`, `indices_in_result` stores the index of that column in the row read from the source memtable or SST. If the source row doesn't contain that column, the index is `None`.
|
||||
|
||||
The field `is_source_needed` stores whether a column in the source memtable or SST is needed.
|
||||
|
||||
Suppose we add a new column `usage_idle` to the table `cpu`.
|
||||
```sql
|
||||
ALTER TABLE cpu ADD COLUMN usage_idle DOUBLE;
|
||||
```
|
||||
|
||||
The new `StoreSchema` becomes:
|
||||
```json
|
||||
{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":6
|
||||
}
|
||||
```
|
||||
|
||||
Note that we bump the version of the schema to 1.
|
||||
|
||||
If we want to select `ts`, `usage_system`, and `usage_idle`. While reading from the old schema, the storage engine creates a `ReadAdapter` like this:
|
||||
```json
|
||||
{
|
||||
"source_schema":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_user",
|
||||
"usage_system",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":0
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"dest_schema":{
|
||||
"schema_to_read":{
|
||||
"schema":{
|
||||
"column_schemas":[
|
||||
"datacenter",
|
||||
"host",
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle",
|
||||
"__sequence",
|
||||
"__op_type"
|
||||
],
|
||||
"time_index":2,
|
||||
"version":1
|
||||
},
|
||||
"row_key_end":3,
|
||||
"user_column_end":5
|
||||
},
|
||||
"projected_user_schema":{
|
||||
"column_schemas":[
|
||||
"ts",
|
||||
"usage_system",
|
||||
"usage_idle"
|
||||
],
|
||||
"time_index":0
|
||||
}
|
||||
},
|
||||
"indices_in_result":[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
null,
|
||||
4,
|
||||
5
|
||||
],
|
||||
"is_source_needed":[
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
We don't need to read `usage_user` so `is_source_needed[3]` is false. The old schema doesn't have column `usage_idle` so `indices_in_result[4]` is `null` and the `ReadAdapter` needs to insert a null column to the output row so the output schema still contains `usage_idle`.
|
||||
|
||||
The figure below shows the relationship between `RegionSchema`, `StoreSchema`, `ProjectedSchema`, and `ReadAdapter`.
|
||||
|
||||
```text
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ store_schema │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema │ │
|
||||
│ │ version 1 │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ ┌────────────────────┐ │
|
||||
│ │ user_schema │ │
|
||||
│ └────────────────────┘ │
|
||||
│ │
|
||||
│ RegionSchema │
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
┌──────────────▼───────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ │
|
||||
│ │ schema_to_read │ │
|
||||
│ │ │ │
|
||||
│ │ StoreSchema (projected) │ │
|
||||
│ │ version 1 │ │
|
||||
│ └──────────────────────────┘ │
|
||||
┌───┤ ├───┐
|
||||
│ │ ┌──────────────────────────┐ │ │
|
||||
│ │ │ projected_user_schema │ │ │
|
||||
│ │ └──────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ProjectedSchema │ │
|
||||
dest schema │ └──────────────────────────────┘ │ dest schema
|
||||
│ │
|
||||
│ │
|
||||
┌──────▼───────┐ ┌───────▼──────┐
|
||||
│ │ │ │
|
||||
│ ReadAdapter │ │ ReadAdapter │
|
||||
│ │ │ │
|
||||
└──────▲───────┘ └───────▲──────┘
|
||||
│ │
|
||||
│ │
|
||||
source schema │ │ source schema
|
||||
│ │
|
||||
┌───────┴─────────┐ ┌────────┴────────┐
|
||||
│ │ │ │
|
||||
│ ┌─────────────┐ │ │ ┌─────────────┐ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ StoreSchema │ │ │ │ StoreSchema │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ version 0 │ │ │ │ version 1 │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ └─────────────┘ │ │ └─────────────┘ │
|
||||
│ │ │ │
|
||||
│ SST 0 │ │ SST 1 │
|
||||
│ │ │ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
# Conversion
|
||||
This figure shows the conversion between schemas:
|
||||
```text
|
||||
┌─────────────┐ schema From ┌─────────────┐
|
||||
│ ├──────────────────┐ ┌────────────────────────────► │
|
||||
│ TableMeta │ │ │ │ RawSchema │
|
||||
│ │ │ │ ┌─────────────────────────┤ │
|
||||
└─────────────┘ │ │ │ TryFrom └─────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
│ │ │
|
||||
┌───────────────────┐ ┌─────▼──┴──▼──┐ arrow_schema() ┌─────────────────┐
|
||||
│ │ │ ├─────────────────────► │
|
||||
│ ColumnsMetadata │ ┌─────► Schema │ │ ArrowSchema ├──┐
|
||||
│ │ │ │ ◄─────────────────────┤ │ │
|
||||
└────┬───────────▲──┘ │ └───▲───▲──────┘ TryFrom └─────────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ └────────────────────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ columns │ user_schema() │ │ │
|
||||
│ │ │ │ projected_user_schema() schema() │
|
||||
│ │ │ │ │ │
|
||||
│ ┌───┴─────────────┴─┐ │ ┌────────────────────┐ │ │
|
||||
columns │ │ │ └─────────────────┤ │ │ │ TryFrom
|
||||
│ │ RegionSchema │ │ ProjectedSchema │ │ │
|
||||
│ │ ├─────────────────────────► │ │ │
|
||||
│ └─────────────────┬─┘ ProjectedSchema::new() └──────────────────┬─┘ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │
|
||||
┌────▼────────────────────┐ │ store_schema() ┌────▼───────┴──┐ │
|
||||
│ │ └─────────────────────────────────────────► │ │
|
||||
│ Vec<ColumnMetadata> │ │ StoreSchema ◄─────┘
|
||||
│ ◄──────────────────────────────────────────────┤ │
|
||||
└─────────────────────────┘ columns └───────────────┘
|
||||
```
|
||||
@@ -1,2 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-04-18"
|
||||
channel = "nightly-2024-04-20"
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum Error {
|
||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||
UnknownColumnDataType {
|
||||
datatype: i32,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: prost::DecodeError,
|
||||
@@ -38,12 +39,14 @@ pub enum Error {
|
||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||
IntoColumnDataType {
|
||||
from: ConcreteDataType,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||
ConvertColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -51,6 +54,7 @@ pub enum Error {
|
||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||
InvalidColumnDefaultConstraint {
|
||||
column: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
@@ -480,6 +480,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||
None => "ddl.empty",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,11 +34,13 @@ pub enum Error {
|
||||
Io {
|
||||
#[snafu(source)]
|
||||
error: std::io::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Auth failed"))]
|
||||
AuthBackend {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
@@ -72,7 +74,10 @@ pub enum Error {
|
||||
},
|
||||
|
||||
#[snafu(display("User is not authorized to perform this action"))]
|
||||
PermissionDenied { location: Location },
|
||||
PermissionDenied {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
|
||||
@@ -30,6 +30,7 @@ pub enum PermissionReq<'a> {
|
||||
PromStoreWrite,
|
||||
PromStoreRead,
|
||||
Otlp,
|
||||
LogWrite,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -52,7 +52,10 @@ fn test_permission_checker() {
|
||||
|
||||
let sql_result = checker.check_permission(
|
||||
None,
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
|
||||
PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
|
||||
ShowKind::All,
|
||||
false,
|
||||
))),
|
||||
);
|
||||
assert_matches!(sql_result, Ok(PermissionResp::Reject));
|
||||
|
||||
|
||||
14
src/cache/Cargo.toml
vendored
Normal file
14
src/cache/Cargo.toml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "cache"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-meta.workspace = true
|
||||
moka.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
44
src/cache/src/error.rs
vendored
Normal file
44
src/cache/src/error.rs
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||
CacheRequired {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
name: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
135
src/cache/src/lib.rs
vendored
Normal file
135
src/cache/src/lib.rs
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod error;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use catalog::kvbackend::new_table_cache;
|
||||
use common_meta::cache::{
|
||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||
LayeredCacheRegistryBuilder,
|
||||
};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use moka::future::CacheBuilder;
|
||||
use snafu::OptionExt;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
|
||||
|
||||
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||
// Builds table info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_info_cache = Arc::new(new_table_info_cache(
|
||||
TABLE_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table name cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_name_cache = Arc::new(new_table_name_cache(
|
||||
TABLE_NAME_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table route cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_route_cache = Arc::new(new_table_route_cache(
|
||||
TABLE_ROUTE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
// Builds table flownode set cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
|
||||
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
// Builds the view info cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let view_info_cache = Arc::new(new_view_info_cache(
|
||||
VIEW_INFO_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
kv_backend.clone(),
|
||||
));
|
||||
|
||||
CacheRegistryBuilder::default()
|
||||
.add_cache(table_info_cache)
|
||||
.add_cache(table_name_cache)
|
||||
.add_cache(table_route_cache)
|
||||
.add_cache(view_info_cache)
|
||||
.add_cache(table_flownode_set_cache)
|
||||
.build()
|
||||
}
|
||||
|
||||
// TODO(weny): Make the cache configurable.
|
||||
pub fn with_default_composite_cache_registry(
|
||||
builder: LayeredCacheRegistryBuilder,
|
||||
) -> Result<LayeredCacheRegistryBuilder> {
|
||||
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_INFO_CACHE_NAME,
|
||||
})?;
|
||||
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||
name: TABLE_NAME_CACHE_NAME,
|
||||
})?;
|
||||
|
||||
// Builds table cache
|
||||
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(DEFAULT_CACHE_TTL)
|
||||
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||
.build();
|
||||
let table_cache = Arc::new(new_table_cache(
|
||||
TABLE_CACHE_NAME.to_string(),
|
||||
cache,
|
||||
table_info_cache,
|
||||
table_name_cache,
|
||||
));
|
||||
|
||||
let registry = CacheRegistryBuilder::default()
|
||||
.add_cache(table_cache)
|
||||
.build();
|
||||
|
||||
Ok(builder.add_cache_registry(registry))
|
||||
}
|
||||
@@ -16,6 +16,7 @@ arrow.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
async-stream.workspace = true
|
||||
async-trait = "0.1"
|
||||
bytes.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-config.workspace = true
|
||||
common-error.workspace = true
|
||||
@@ -48,8 +49,11 @@ table.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
cache.workspace = true
|
||||
catalog = { workspace = true, features = ["testing"] }
|
||||
chrono.workspace = true
|
||||
common-meta = { workspace = true, features = ["testing"] }
|
||||
common-query = { workspace = true, features = ["testing"] }
|
||||
common-test-util.workspace = true
|
||||
log-store.workspace = true
|
||||
object-store.workspace = true
|
||||
|
||||
@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use snafu::{Location, Snafu};
|
||||
use table::metadata::TableId;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -30,12 +27,14 @@ use tokio::task::JoinError;
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to list catalogs"))]
|
||||
ListCatalogs {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||
ListSchemas {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
source: BoxedError,
|
||||
@@ -43,6 +42,7 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
||||
ListTables {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
@@ -51,78 +51,37 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||
ListNodes {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||
CompileScriptInternal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
#[snafu(display("Failed to open system catalog table"))]
|
||||
OpenSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create system catalog table"))]
|
||||
CreateSystemCatalog {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||
CreateTable {
|
||||
table_info: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||
SystemCatalog { msg: String, location: Location },
|
||||
|
||||
#[snafu(display(
|
||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
||||
data_type,
|
||||
))]
|
||||
SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType,
|
||||
SystemCatalog {
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
||||
InvalidEntryType {
|
||||
entry_type: Option<u8>,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
||||
InvalidKey {
|
||||
key: Option<String>,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Catalog value is not present"))]
|
||||
EmptyValue { location: Location },
|
||||
|
||||
#[snafu(display("Failed to deserialize value"))]
|
||||
ValueDeserialize {
|
||||
#[snafu(source)]
|
||||
error: serde_json::error::Error,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
||||
TableEngineNotFound {
|
||||
engine_name: String,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||
CatalogNotFound {
|
||||
catalog_name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -130,43 +89,28 @@ pub enum Error {
|
||||
SchemaNotFound {
|
||||
catalog: String,
|
||||
schema: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table `{}` already exists", table))]
|
||||
TableExists { table: String, location: Location },
|
||||
TableExists {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found: {}", table))]
|
||||
TableNotExist { table: String, location: Location },
|
||||
|
||||
#[snafu(display("Schema {} already exists", schema))]
|
||||
SchemaExists { schema: String, location: Location },
|
||||
|
||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
||||
Unimplemented {
|
||||
operation: String,
|
||||
TableNotExist {
|
||||
table: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Operation {} not supported", op))]
|
||||
NotSupported { op: String, location: Location },
|
||||
|
||||
#[snafu(display("Failed to open table {table_id}"))]
|
||||
OpenTable {
|
||||
table_id: TableId,
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to open table in parallel"))]
|
||||
ParallelOpenTable {
|
||||
#[snafu(source)]
|
||||
error: JoinError,
|
||||
},
|
||||
|
||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
||||
TableNotFound {
|
||||
table_info: String,
|
||||
#[snafu(display("View info not found: {}", name))]
|
||||
ViewInfoNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
@@ -176,59 +120,44 @@ pub enum Error {
|
||||
#[snafu(display("Failed to find region routes"))]
|
||||
FindRegionRoutes { source: partition::error::Error },
|
||||
|
||||
#[snafu(display("Failed to read system catalog table records"))]
|
||||
ReadSystemCatalog {
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create recordbatch"))]
|
||||
CreateRecordBatch {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_recordbatch::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
||||
InsertCatalogRecord {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to scan system catalog table"))]
|
||||
SystemCatalogTableScan {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error"))]
|
||||
Internal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||
UpgradeWeakCatalogManagerRef { location: Location },
|
||||
UpgradeWeakCatalogManagerRef {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
||||
SystemCatalogTableScanExec {
|
||||
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||
DecodePlan {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Cannot parse catalog value"))]
|
||||
InvalidCatalogValue {
|
||||
location: Location,
|
||||
source: common_catalog::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to perform metasrv operation"))]
|
||||
Metasrv {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: meta_client::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table info in catalog"))]
|
||||
InvalidTableInfoInCatalog {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
@@ -240,29 +169,43 @@ pub enum Error {
|
||||
Datafusion {
|
||||
#[snafu(source)]
|
||||
error: DataFusionError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Table schema mismatch"))]
|
||||
TableSchemaMismatch {
|
||||
location: Location,
|
||||
source: table::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
||||
Generic { msg: String, location: Location },
|
||||
|
||||
#[snafu(display("Table metadata manager error"))]
|
||||
TableMetadataManager {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
||||
TableCacheNotGet { key: String, location: Location },
|
||||
#[snafu(display("Failed to get table cache"))]
|
||||
GetTableCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
||||
GetTableCache { err_msg: String },
|
||||
#[snafu(display("Failed to get view info from cache"))]
|
||||
GetViewCache {
|
||||
source: common_meta::error::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Cache not found: {name}"))]
|
||||
CacheNotFound {
|
||||
name: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to cast the catalog manager"))]
|
||||
CastManager {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -270,61 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
impl ErrorExt for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::InvalidKey { .. }
|
||||
| Error::SchemaNotFound { .. }
|
||||
Error::SchemaNotFound { .. }
|
||||
| Error::CatalogNotFound { .. }
|
||||
| Error::FindPartitions { .. }
|
||||
| Error::FindRegionRoutes { .. }
|
||||
| Error::InvalidEntryType { .. }
|
||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
||||
| Error::CacheNotFound { .. }
|
||||
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
||||
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||
|
||||
Error::SystemCatalog { .. }
|
||||
| Error::EmptyValue { .. }
|
||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||
|
||||
Error::Generic { .. }
|
||||
| Error::SystemCatalogTypeMismatch { .. }
|
||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
||||
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||
|
||||
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
||||
StatusCode::InvalidArguments
|
||||
}
|
||||
|
||||
Error::ListCatalogs { source, .. }
|
||||
| Error::ListNodes { source, .. }
|
||||
| Error::ListSchemas { source, .. }
|
||||
| Error::ListTables { source, .. } => source.status_code(),
|
||||
|
||||
Error::OpenSystemCatalog { source, .. }
|
||||
| Error::CreateSystemCatalog { source, .. }
|
||||
| Error::InsertCatalogRecord { source, .. }
|
||||
| Error::OpenTable { source, .. }
|
||||
| Error::CreateTable { source, .. }
|
||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
||||
Error::CreateTable { source, .. } => source.status_code(),
|
||||
|
||||
Error::Metasrv { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
||||
Error::DecodePlan { source, .. } => source.status_code(),
|
||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||
|
||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,11 +281,6 @@ mod tests {
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Unexpected,
|
||||
InvalidKeySnafu { key: None }.build().status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
Error::SystemCatalog {
|
||||
@@ -369,19 +289,6 @@ mod tests {
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
StatusCode::Internal,
|
||||
Error::SystemCatalogTypeMismatch {
|
||||
data_type: ConcreteDataType::binary_datatype(),
|
||||
location: Location::generate(),
|
||||
}
|
||||
.status_code()
|
||||
);
|
||||
assert_eq!(
|
||||
StatusCode::StorageUnavailable,
|
||||
EmptyValueSnafu {}.build().status_code()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -21,11 +21,11 @@ use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||
use common_meta::peer::Peer;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_telemetry::warn;
|
||||
use common_time::timestamp::Timestamp;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -20,9 +20,9 @@ use common_catalog::consts::{
|
||||
SEMANTIC_TYPE_TIME_INDEX,
|
||||
};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::datetime::DateTime;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -14,10 +14,9 @@
|
||||
|
||||
use arrow::array::StringArray;
|
||||
use arrow::compute::kernels::comparison;
|
||||
use common_query::logical_plan::DfExpr;
|
||||
use datafusion::common::ScalarValue;
|
||||
use datafusion::logical_expr::expr::Like;
|
||||
use datafusion::logical_expr::Operator;
|
||||
use datafusion::logical_expr::{Expr, Operator};
|
||||
use datatypes::value::Value;
|
||||
use store_api::storage::ScanRequest;
|
||||
|
||||
@@ -118,12 +117,12 @@ impl Predicate {
|
||||
}
|
||||
|
||||
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
|
||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
||||
fn from_expr(expr: Expr) -> Option<Predicate> {
|
||||
match expr {
|
||||
// NOT expr
|
||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||
// expr LIKE pattern
|
||||
DfExpr::Like(Like {
|
||||
Expr::Like(Like {
|
||||
negated,
|
||||
expr,
|
||||
pattern,
|
||||
@@ -131,10 +130,10 @@ impl Predicate {
|
||||
..
|
||||
}) if is_column(&expr) && is_string_literal(&pattern) => {
|
||||
// Safety: ensured by gurad
|
||||
let DfExpr::Column(c) = *expr else {
|
||||
let Expr::Column(c) = *expr else {
|
||||
unreachable!();
|
||||
};
|
||||
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -147,10 +146,10 @@ impl Predicate {
|
||||
}
|
||||
}
|
||||
// left OP right
|
||||
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||
// left == right
|
||||
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
|
||||
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -158,8 +157,8 @@ impl Predicate {
|
||||
Some(Predicate::Eq(c.name, v))
|
||||
}
|
||||
// left != right
|
||||
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
|
||||
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
|
||||
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
|
||||
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
|
||||
let Ok(v) = Value::try_from(scalar) else {
|
||||
return None;
|
||||
};
|
||||
@@ -183,14 +182,14 @@ impl Predicate {
|
||||
_ => None,
|
||||
},
|
||||
// [NOT] IN (LIST)
|
||||
DfExpr::InList(list) => {
|
||||
Expr::InList(list) => {
|
||||
match (*list.expr, list.list, list.negated) {
|
||||
// column [NOT] IN (v1, v2, v3, ...)
|
||||
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||
(Expr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||
let mut values = Vec::with_capacity(list.len());
|
||||
for scalar in list {
|
||||
// Safety: checked by `is_all_scalars`
|
||||
let DfExpr::Literal(scalar) = scalar else {
|
||||
let Expr::Literal(scalar) = scalar else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
@@ -237,12 +236,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
|
||||
Some(booleans.value(0))
|
||||
}
|
||||
|
||||
fn is_string_literal(expr: &DfExpr) -> bool {
|
||||
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
|
||||
fn is_string_literal(expr: &Expr) -> bool {
|
||||
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
|
||||
}
|
||||
|
||||
fn is_column(expr: &DfExpr) -> bool {
|
||||
matches!(expr, DfExpr::Column(_))
|
||||
fn is_column(expr: &Expr) -> bool {
|
||||
matches!(expr, Expr::Column(_))
|
||||
}
|
||||
|
||||
/// A list of predicate
|
||||
@@ -257,7 +256,7 @@ impl Predicates {
|
||||
let mut predicates = Vec::with_capacity(request.filters.len());
|
||||
|
||||
for filter in &request.filters {
|
||||
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
|
||||
if let Some(predicate) = Predicate::from_expr(filter.clone()) {
|
||||
predicates.push(predicate);
|
||||
}
|
||||
}
|
||||
@@ -286,8 +285,8 @@ impl Predicates {
|
||||
}
|
||||
|
||||
/// Returns true when the values are all [`DfExpr::Literal`].
|
||||
fn is_all_scalars(list: &[DfExpr]) -> bool {
|
||||
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
|
||||
fn is_all_scalars(list: &[Expr]) -> bool {
|
||||
list.iter().all(|v| matches!(v, Expr::Literal(_)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -376,7 +375,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_predicate_like() {
|
||||
// case insensitive
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -403,7 +402,7 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
|
||||
// case sensitive
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: false,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -423,7 +422,7 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
|
||||
// not like
|
||||
let expr = DfExpr::Like(Like {
|
||||
let expr = Expr::Like(Like {
|
||||
negated: true,
|
||||
expr: Box::new(column("a")),
|
||||
pattern: Box::new(string_literal("%abc")),
|
||||
@@ -437,15 +436,15 @@ mod tests {
|
||||
assert!(p.eval(&[]).is_none());
|
||||
}
|
||||
|
||||
fn column(name: &str) -> DfExpr {
|
||||
DfExpr::Column(Column {
|
||||
fn column(name: &str) -> Expr {
|
||||
Expr::Column(Column {
|
||||
relation: None,
|
||||
name: name.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn string_literal(v: &str) -> DfExpr {
|
||||
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||
fn string_literal(v: &str) -> Expr {
|
||||
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||
}
|
||||
|
||||
fn match_string_value(v: &Value, expected: &str) -> bool {
|
||||
@@ -463,14 +462,14 @@ mod tests {
|
||||
result
|
||||
}
|
||||
|
||||
fn mock_exprs() -> (DfExpr, DfExpr) {
|
||||
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
|
||||
fn mock_exprs() -> (Expr, Expr) {
|
||||
let expr1 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("a")),
|
||||
op: Operator::Eq,
|
||||
right: Box::new(string_literal("a_value")),
|
||||
});
|
||||
|
||||
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let expr2 = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(column("b")),
|
||||
op: Operator::NotEq,
|
||||
right: Box::new(string_literal("b_value")),
|
||||
@@ -491,17 +490,17 @@ mod tests {
|
||||
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
|
||||
&& match_string_value(v, "b_value")));
|
||||
|
||||
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let and_expr = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(expr1.clone()),
|
||||
op: Operator::And,
|
||||
right: Box::new(expr2.clone()),
|
||||
});
|
||||
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
|
||||
let or_expr = Expr::BinaryExpr(BinaryExpr {
|
||||
left: Box::new(expr1.clone()),
|
||||
op: Operator::Or,
|
||||
right: Box::new(expr2.clone()),
|
||||
});
|
||||
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
|
||||
let not_expr = Expr::Not(Box::new(expr1.clone()));
|
||||
|
||||
let and_p = Predicate::from_expr(and_expr).unwrap();
|
||||
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
|
||||
@@ -510,7 +509,7 @@ mod tests {
|
||||
let not_p = Predicate::from_expr(not_expr).unwrap();
|
||||
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
|
||||
|
||||
let inlist_expr = DfExpr::InList(InList {
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
negated: false,
|
||||
@@ -520,7 +519,7 @@ mod tests {
|
||||
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
|
||||
&& match_string_values(values, &["a1", "a2"])));
|
||||
|
||||
let inlist_expr = DfExpr::InList(InList {
|
||||
let inlist_expr = Expr::InList(InList {
|
||||
expr: Box::new(column("a")),
|
||||
list: vec![string_literal("a1"), string_literal("a2")],
|
||||
negated: true,
|
||||
@@ -540,7 +539,7 @@ mod tests {
|
||||
let (expr1, expr2) = mock_exprs();
|
||||
|
||||
let request = ScanRequest {
|
||||
filters: vec![expr1.into(), expr2.into()],
|
||||
filters: vec![expr1, expr2],
|
||||
..Default::default()
|
||||
};
|
||||
let predicates = Predicates::from_scan_request(&Some(request));
|
||||
@@ -578,7 +577,7 @@ mod tests {
|
||||
|
||||
let (expr1, expr2) = mock_exprs();
|
||||
let request = ScanRequest {
|
||||
filters: vec![expr1.into(), expr2.into()],
|
||||
filters: vec![expr1, expr2],
|
||||
..Default::default()
|
||||
};
|
||||
let predicates = Predicates::from_scan_request(&Some(request));
|
||||
|
||||
@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -31,7 +31,7 @@ use datatypes::value::Value;
|
||||
use datatypes::vectors::{Int64VectorBuilder, StringVectorBuilder, UInt64VectorBuilder};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
use store_api::storage::{RegionId, ScanRequest, TableId};
|
||||
use table::metadata::TableType;
|
||||
|
||||
use super::REGION_PEERS;
|
||||
@@ -205,8 +205,8 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
table_ids.into_iter().map(|id| (id, vec![])).collect()
|
||||
};
|
||||
|
||||
for routes in table_routes.values() {
|
||||
self.add_region_peers(&predicates, routes);
|
||||
for (table_id, routes) in table_routes {
|
||||
self.add_region_peers(&predicates, table_id, &routes);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -214,9 +214,14 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_region_peers(&mut self, predicates: &Predicates, routes: &[RegionRoute]) {
|
||||
fn add_region_peers(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
table_id: TableId,
|
||||
routes: &[RegionRoute],
|
||||
) {
|
||||
for route in routes {
|
||||
let region_id = route.region.id.as_u64();
|
||||
let region_id = RegionId::new(table_id, route.region.id.region_number()).as_u64();
|
||||
let peer_id = route.leader_peer.clone().map(|p| p.id);
|
||||
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
|
||||
let status = if let Some(status) = route.leader_status {
|
||||
|
||||
@@ -17,10 +17,10 @@ use std::sync::Arc;
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::util::current_time_millis;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,10 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -32,15 +33,18 @@ use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use super::SCHEMATA;
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
CreateRecordBatchSnafu, InternalSnafu, Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu,
|
||||
UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{InformationTable, Predicates};
|
||||
use crate::information_schema::{utils, InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
pub const CATALOG_NAME: &str = "catalog_name";
|
||||
pub const SCHEMA_NAME: &str = "schema_name";
|
||||
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
|
||||
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
|
||||
/// The database options
|
||||
pub const SCHEMA_OPTS: &str = "options";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `information_schema.schemata` table implementation.
|
||||
@@ -74,6 +78,7 @@ impl InformationSchemaSchemata {
|
||||
false,
|
||||
),
|
||||
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(SCHEMA_OPTS, ConcreteDataType::string_datatype(), true),
|
||||
]))
|
||||
}
|
||||
|
||||
@@ -133,6 +138,7 @@ struct InformationSchemaSchemataBuilder {
|
||||
charset_names: StringVectorBuilder,
|
||||
collation_names: StringVectorBuilder,
|
||||
sql_paths: StringVectorBuilder,
|
||||
schema_options: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaSchemataBuilder {
|
||||
@@ -150,6 +156,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
schema_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,21 +167,50 @@ impl InformationSchemaSchemataBuilder {
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
|
||||
self.add_schema(&predicates, &catalog_name, &schema_name);
|
||||
let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
|
||||
let schema_opts = table_metadata_manager
|
||||
.schema_manager()
|
||||
.get(SchemaNameKey::new(&catalog_name, &schema_name))
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.context(SchemaNotFoundSnafu {
|
||||
catalog: &catalog_name,
|
||||
schema: &schema_name,
|
||||
})?;
|
||||
|
||||
Some(format!("{schema_opts}"))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.add_schema(
|
||||
&predicates,
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
opts.as_deref().unwrap_or(""),
|
||||
);
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
|
||||
fn add_schema(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
schema_options: &str,
|
||||
) {
|
||||
let row = [
|
||||
(CATALOG_NAME, &Value::from(catalog_name)),
|
||||
(SCHEMA_NAME, &Value::from(schema_name)),
|
||||
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
|
||||
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
|
||||
(SCHEMA_OPTS, &Value::from(schema_options)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
@@ -186,6 +222,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
self.charset_names.push(Some("utf8"));
|
||||
self.collation_names.push(Some("utf8_bin"));
|
||||
self.sql_paths.push(None);
|
||||
self.schema_options.push(Some(schema_options));
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
@@ -195,6 +232,7 @@ impl InformationSchemaSchemataBuilder {
|
||||
Arc::new(self.charset_names.finish()),
|
||||
Arc::new(self.collation_names.finish()),
|
||||
Arc::new(self.sql_paths.finish()),
|
||||
Arc::new(self.schema_options.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
|
||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_query::physical_plan::TaskContext;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
@@ -105,7 +105,9 @@ impl InformationTable for InformationSchemaTables {
|
||||
.make_tables(Some(request))
|
||||
.await
|
||||
.map(|x| x.into_df_record_batch())
|
||||
.map_err(Into::into)
|
||||
.map_err(|err| {
|
||||
datafusion::error::DataFusionError::External(format!("{err:?}").into())
|
||||
})
|
||||
}),
|
||||
));
|
||||
Ok(Box::pin(
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_config::Mode;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use meta_client::client::MetaClient;
|
||||
use snafu::OptionExt;
|
||||
|
||||
@@ -51,3 +52,17 @@ pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<
|
||||
|
||||
Ok(meta_client)
|
||||
}
|
||||
|
||||
/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
|
||||
pub fn table_meta_manager(
|
||||
catalog_manager: &Weak<dyn CatalogManager>,
|
||||
) -> Result<Option<TableMetadataManagerRef>> {
|
||||
let catalog_manager = catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
|
||||
Ok(catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.map(|manager| manager.table_metadata_manager_ref().clone()))
|
||||
}
|
||||
|
||||
@@ -16,5 +16,7 @@ pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend}
|
||||
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
@@ -350,6 +350,13 @@ pub struct MetaKvBackend {
|
||||
pub client: Arc<MetaClient>,
|
||||
}
|
||||
|
||||
impl MetaKvBackend {
|
||||
/// Constructs a [MetaKvBackend].
|
||||
pub fn new(client: Arc<MetaClient>) -> MetaKvBackend {
|
||||
MetaKvBackend { client }
|
||||
}
|
||||
}
|
||||
|
||||
impl TxnService for MetaKvBackend {
|
||||
type Error = Error;
|
||||
}
|
||||
@@ -450,9 +457,8 @@ mod tests {
|
||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||
use common_meta::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
||||
DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest,
|
||||
RangeResponse,
|
||||
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||
PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use dashmap::DashMap;
|
||||
@@ -512,13 +518,6 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn compare_and_put(
|
||||
&self,
|
||||
_req: CompareAndPutRequest,
|
||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn delete_range(
|
||||
&self,
|
||||
_req: DeleteRangeRequest,
|
||||
|
||||
@@ -15,17 +15,14 @@
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use common_catalog::consts::{
|
||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||
};
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_config::Mode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
@@ -35,20 +32,20 @@ use common_meta::kv_backend::KvBackendRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use meta_client::client::MetaClient;
|
||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use snafu::prelude::*;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
||||
use crate::error::{
|
||||
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
|
||||
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
use crate::information_schema::InformationSchemaProvider;
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::CatalogManager;
|
||||
|
||||
/// Access all existing catalog, schema and tables.
|
||||
@@ -64,64 +61,27 @@ pub struct KvBackendCatalogManager {
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
struct TableCacheInvalidator {
|
||||
table_cache: AsyncCache<String, TableRef>,
|
||||
}
|
||||
|
||||
impl TableCacheInvalidator {
|
||||
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
|
||||
Self { table_cache }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CacheInvalidator for TableCacheInvalidator {
|
||||
async fn invalidate(
|
||||
&self,
|
||||
_ctx: &Context,
|
||||
caches: Vec<CacheIdent>,
|
||||
) -> common_meta::error::Result<()> {
|
||||
for cache in caches {
|
||||
if let CacheIdent::TableName(table_name) = cache {
|
||||
let table_cache_key = format_full_table_name(
|
||||
&table_name.catalog_name,
|
||||
&table_name.schema_name,
|
||||
&table_name.table_name,
|
||||
);
|
||||
self.table_cache.invalidate(&table_cache_key).await;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
}
|
||||
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub async fn new(
|
||||
pub fn new(
|
||||
mode: Mode,
|
||||
meta_client: Option<Arc<MetaClient>>,
|
||||
backend: KvBackendRef,
|
||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
) -> Arc<Self> {
|
||||
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
||||
.time_to_live(TABLE_CACHE_TTL)
|
||||
.time_to_idle(TABLE_CACHE_TTI)
|
||||
.build();
|
||||
multi_cache_invalidator
|
||||
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
|
||||
.await;
|
||||
|
||||
Arc::new_cyclic(|me| Self {
|
||||
mode,
|
||||
meta_client,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
@@ -131,7 +91,7 @@ impl KvBackendCatalogManager {
|
||||
me.clone(),
|
||||
)),
|
||||
},
|
||||
table_cache,
|
||||
cache_registry,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -140,6 +100,12 @@ impl KvBackendCatalogManager {
|
||||
&self.mode
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `[MetaClient]`.
|
||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||
self.meta_client.clone()
|
||||
@@ -218,7 +184,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||
if self.system_catalog.schema_exist(schema) {
|
||||
if self.system_catalog.schema_exists(schema) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -230,7 +196,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
|
||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||
if self.system_catalog.table_exist(schema, table) {
|
||||
if self.system_catalog.table_exists(schema, table) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -245,60 +211,29 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
|
||||
async fn table(
|
||||
&self,
|
||||
catalog: &str,
|
||||
schema: &str,
|
||||
catalog_name: &str,
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<Option<TableRef>> {
|
||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
||||
if let Some(table) = self
|
||||
.system_catalog
|
||||
.table(catalog_name, schema_name, table_name)
|
||||
{
|
||||
return Ok(Some(table));
|
||||
}
|
||||
|
||||
let init = async {
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let Some(table_name_value) = self
|
||||
.table_metadata_manager
|
||||
.table_name_manager()
|
||||
.get(table_name_key)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let table_id = table_name_value.table_id();
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
|
||||
let Some(table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
.map(|v| v.into_inner())
|
||||
else {
|
||||
return TableCacheNotGetSnafu {
|
||||
key: table_name_key.to_string(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
build_table(table_info_value)
|
||||
};
|
||||
|
||||
match self
|
||||
.table_cache
|
||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
||||
table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
schema_name: schema_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(table) => Ok(Some(table)),
|
||||
Err(err) => match err.as_ref() {
|
||||
TableCacheNotGet { .. } => Ok(None),
|
||||
_ => Err(err),
|
||||
},
|
||||
}
|
||||
.map_err(|err| GetTableCache {
|
||||
err_msg: err.to_string(),
|
||||
})
|
||||
.context(GetTableCacheSnafu)
|
||||
}
|
||||
|
||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||
@@ -382,11 +317,11 @@ impl SystemCatalog {
|
||||
}
|
||||
}
|
||||
|
||||
fn schema_exist(&self, schema: &str) -> bool {
|
||||
fn schema_exists(&self, schema: &str) -> bool {
|
||||
schema == INFORMATION_SCHEMA_NAME
|
||||
}
|
||||
|
||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
||||
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
self.information_schema_provider.table(table).is_some()
|
||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||
|
||||
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||
use common_meta::instruction::CacheIdent;
|
||||
use futures::future::BoxFuture;
|
||||
use moka::future::Cache;
|
||||
use snafu::OptionExt;
|
||||
use table::dist_table::DistTable;
|
||||
use table::table_name::TableName;
|
||||
use table::TableRef;
|
||||
|
||||
pub type TableCacheRef = Arc<TableCache>;
|
||||
|
||||
/// [TableCache] caches the [TableName] to [TableRef] mapping.
|
||||
pub type TableCache = CacheContainer<TableName, TableRef, CacheIdent>;
|
||||
|
||||
/// Constructs a [TableCache].
|
||||
pub fn new_table_cache(
|
||||
name: String,
|
||||
cache: Cache<TableName, TableRef>,
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> TableCache {
|
||||
let init = init_factory(table_info_cache, table_name_cache);
|
||||
|
||||
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||
}
|
||||
|
||||
fn init_factory(
|
||||
table_info_cache: TableInfoCacheRef,
|
||||
table_name_cache: TableNameCacheRef,
|
||||
) -> Initializer<TableName, TableRef> {
|
||||
Arc::new(move |table_name| {
|
||||
let table_info_cache = table_info_cache.clone();
|
||||
let table_name_cache = table_name_cache.clone();
|
||||
Box::pin(async move {
|
||||
let table_id = table_name_cache
|
||||
.get_by_ref(table_name)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
let table_info = table_info_cache
|
||||
.get_by_ref(&table_id)
|
||||
.await?
|
||||
.context(ValueNotExistSnafu)?;
|
||||
|
||||
Ok(Some(DistTable::table(table_info)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn invalidator<'a>(
|
||||
cache: &'a Cache<TableName, TableRef>,
|
||||
ident: &'a CacheIdent,
|
||||
) -> BoxFuture<'a, MetaResult<()>> {
|
||||
Box::pin(async move {
|
||||
if let CacheIdent::TableName(table_name) = ident {
|
||||
cache.invalidate(table_name).await
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn filter(ident: &CacheIdent) -> bool {
|
||||
matches!(ident, CacheIdent::TableName(_))
|
||||
}
|
||||
@@ -15,15 +15,25 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||
use datafusion::datasource::provider_as_source;
|
||||
use datafusion::datasource::view::ViewTable;
|
||||
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||
use datafusion::logical_expr::TableSource;
|
||||
use session::context::QueryContext;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use table::metadata::TableType;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
mod dummy_catalog;
|
||||
use dummy_catalog::DummyCatalogList;
|
||||
|
||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
||||
use crate::error::{
|
||||
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||
};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
pub struct DfTableSourceProvider {
|
||||
@@ -32,6 +42,7 @@ pub struct DfTableSourceProvider {
|
||||
disallow_cross_catalog_query: bool,
|
||||
default_catalog: String,
|
||||
default_schema: String,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
}
|
||||
|
||||
impl DfTableSourceProvider {
|
||||
@@ -39,6 +50,7 @@ impl DfTableSourceProvider {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
disallow_cross_catalog_query: bool,
|
||||
query_ctx: &QueryContext,
|
||||
plan_decoder: SubstraitPlanDecoderRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
catalog_manager,
|
||||
@@ -46,6 +58,7 @@ impl DfTableSourceProvider {
|
||||
resolved_tables: HashMap::new(),
|
||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||
default_schema: query_ctx.current_schema().to_owned(),
|
||||
plan_decoder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,8 +107,39 @@ impl DfTableSourceProvider {
|
||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let provider = DfTableProviderAdapter::new(table);
|
||||
let source = provider_as_source(Arc::new(provider));
|
||||
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.as_any()
|
||||
.downcast_ref::<KvBackendCatalogManager>()
|
||||
.context(CastManagerSnafu)?;
|
||||
|
||||
let view_info = catalog_manager
|
||||
.view_info_cache()?
|
||||
.get(table.table_info().ident.table_id)
|
||||
.await
|
||||
.context(GetViewCacheSnafu)?
|
||||
.context(ViewInfoNotFoundSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
// Build the catalog list provider for deserialization.
|
||||
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||
let logical_plan = self
|
||||
.plan_decoder
|
||||
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||
.await
|
||||
.context(DecodePlanSnafu {
|
||||
name: &table.table_info().name,
|
||||
})?;
|
||||
|
||||
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||
} else {
|
||||
Arc::new(DfTableProviderAdapter::new(table))
|
||||
};
|
||||
|
||||
let source = provider_as_source(provider);
|
||||
|
||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||
Ok(source)
|
||||
}
|
||||
@@ -103,6 +147,7 @@ impl DfTableSourceProvider {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::test_util::DummyDecoder;
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
@@ -112,8 +157,12 @@ mod tests {
|
||||
fn test_validate_table_ref() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
|
||||
let table_provider =
|
||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
||||
let table_provider = DfTableSourceProvider::new(
|
||||
MemoryCatalogManager::with_default_setup(),
|
||||
true,
|
||||
query_ctx,
|
||||
DummyDecoder::arc(),
|
||||
);
|
||||
|
||||
let table_ref = TableReference::bare("table_name");
|
||||
let result = table_provider.resolve_table_ref(table_ref);
|
||||
@@ -148,4 +197,99 @@ mod tests {
|
||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||
}
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use common_config::Mode;
|
||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_query::error::Result as QueryResult;
|
||||
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||
use datafusion::catalog::CatalogProviderList;
|
||||
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
|
||||
struct MockDecoder;
|
||||
impl MockDecoder {
|
||||
pub fn arc() -> Arc<Self> {
|
||||
Arc::new(MockDecoder)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl SubstraitPlanDecoder for MockDecoder {
|
||||
async fn decode(
|
||||
&self,
|
||||
_message: bytes::Bytes,
|
||||
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||
_optimize: bool,
|
||||
) -> QueryResult<LogicalPlan> {
|
||||
Ok(mock_plan())
|
||||
}
|
||||
}
|
||||
|
||||
fn mock_plan() -> LogicalPlan {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, true),
|
||||
Field::new("name", DataType::Utf8, true),
|
||||
]);
|
||||
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
|
||||
|
||||
let projection = None;
|
||||
|
||||
let builder =
|
||||
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
|
||||
|
||||
builder
|
||||
.filter(col("id").gt(lit(500)))
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resolve_view() {
|
||||
let query_ctx = &QueryContext::with("greptime", "public");
|
||||
let backend = Arc::new(MemoryKvBackend::default());
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
|
||||
let layered_cache_registry = Arc::new(
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.unwrap()
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Mode::Standalone,
|
||||
None,
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
);
|
||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||
view_info.table_type = TableType::View;
|
||||
let logical_plan = vec![1, 2, 3];
|
||||
// Create view metadata
|
||||
table_metadata_manager
|
||||
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut table_provider =
|
||||
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||
|
||||
// View not found
|
||||
let table_ref = TableReference::bare("not_exists_view");
|
||||
assert!(table_provider.resolve_table(table_ref).await.is_err());
|
||||
|
||||
let table_ref = TableReference::bare(view_info.name);
|
||||
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||
}
|
||||
}
|
||||
|
||||
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Dummy catalog for region server.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::format_full_table_name;
|
||||
use datafusion::catalog::schema::SchemaProvider;
|
||||
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
||||
use datafusion::datasource::TableProvider;
|
||||
use snafu::OptionExt;
|
||||
use table::table::adapter::DfTableProviderAdapter;
|
||||
|
||||
use crate::error::TableNotExistSnafu;
|
||||
use crate::CatalogManagerRef;
|
||||
|
||||
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||
#[derive(Clone)]
|
||||
pub struct DummyCatalogList {
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl DummyCatalogList {
|
||||
/// Creates a new catalog list with the given catalog manager.
|
||||
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
|
||||
Self { catalog_manager }
|
||||
}
|
||||
}
|
||||
|
||||
impl CatalogProviderList for DummyCatalogList {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn register_catalog(
|
||||
&self,
|
||||
_name: String,
|
||||
_catalog: Arc<dyn CatalogProvider>,
|
||||
) -> Option<Arc<dyn CatalogProvider>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn catalog_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
|
||||
Some(Arc::new(DummyCatalogProvider {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy catalog provider for [DummyCatalogList].
|
||||
#[derive(Clone)]
|
||||
struct DummyCatalogProvider {
|
||||
catalog_name: String,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl CatalogProvider for DummyCatalogProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn schema_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
|
||||
Some(Arc::new(DummySchemaProvider {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
schema_name: schema_name.to_string(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// A dummy schema provider for [DummyCatalogList].
|
||||
#[derive(Clone)]
|
||||
struct DummySchemaProvider {
|
||||
catalog_name: String,
|
||||
schema_name: String,
|
||||
catalog_manager: CatalogManagerRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SchemaProvider for DummySchemaProvider {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<String> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||
let table = self
|
||||
.catalog_manager
|
||||
.table(&self.catalog_name, &self.schema_name, name)
|
||||
.await?
|
||||
.with_context(|| TableNotExistSnafu {
|
||||
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||
})?;
|
||||
|
||||
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
|
||||
|
||||
Ok(Some(table_provider))
|
||||
}
|
||||
|
||||
fn table_exist(&self, _name: &str) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
@@ -31,9 +31,11 @@ moka = { workspace = true, features = ["future"] }
|
||||
parking_lot = "0.12"
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
serde_json.workspace = true
|
||||
snafu.workspace = true
|
||||
substrait.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { workspace = true, features = ["net"] }
|
||||
tonic.workspace = true
|
||||
@@ -42,7 +44,6 @@ tonic.workspace = true
|
||||
common-grpc-expr.workspace = true
|
||||
datanode.workspace = true
|
||||
derive-new = "0.5"
|
||||
substrait.workspace = true
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
|
||||
@@ -14,14 +14,16 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::flow::flow_client::FlowClient as PbFlowClient;
|
||||
use api::v1::health_check_client::HealthCheckClient;
|
||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||
use api::v1::HealthCheckRequest;
|
||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||
use common_grpc::channel_manager::ChannelManager;
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||
use parking_lot::RwLock;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tonic::codec::CompressionEncoding;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||
@@ -86,6 +88,17 @@ impl Client {
|
||||
Self::with_manager_and_urls(ChannelManager::new(), urls)
|
||||
}
|
||||
|
||||
pub fn with_tls_and_urls<U, A>(urls: A, client_tls: ClientTlsOption) -> Result<Self>
|
||||
where
|
||||
U: AsRef<str>,
|
||||
A: AsRef<[U]>,
|
||||
{
|
||||
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
|
||||
let channel_manager = ChannelManager::with_tls_config(channel_config)
|
||||
.context(error::CreateTlsChannelSnafu)?;
|
||||
Ok(Self::with_manager_and_urls(channel_manager, urls))
|
||||
}
|
||||
|
||||
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
|
||||
where
|
||||
U: AsRef<str>,
|
||||
@@ -151,24 +164,44 @@ impl Client {
|
||||
|
||||
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
Ok(FlightClient {
|
||||
addr,
|
||||
client: FlightServiceClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
||||
})
|
||||
|
||||
let client = FlightServiceClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
|
||||
Ok(FlightClient { addr, client })
|
||||
}
|
||||
|
||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PbRegionClient::new(channel)
|
||||
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
let client = PbRegionClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size()))
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
Ok((addr, client))
|
||||
}
|
||||
|
||||
pub(crate) fn raw_flow_client(&self) -> Result<(String, PbFlowClient<Channel>)> {
|
||||
let (addr, channel) = self.find_channel()?;
|
||||
let client = PbFlowClient::new(channel)
|
||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
Ok((addr, client))
|
||||
}
|
||||
|
||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||
let (_, channel) = self.find_channel()?;
|
||||
Ok(PrometheusGatewayClient::new(channel))
|
||||
let client = PrometheusGatewayClient::new(channel)
|
||||
.accept_compressed(CompressionEncoding::Gzip)
|
||||
.accept_compressed(CompressionEncoding::Zstd)
|
||||
.send_compressed(CompressionEncoding::Gzip)
|
||||
.send_compressed(CompressionEncoding::Zstd);
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
|
||||
@@ -21,43 +21,45 @@ use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||
use common_meta::peer::Peer;
|
||||
use moka::future::{Cache, CacheBuilder};
|
||||
|
||||
use crate::flow::FlowRequester;
|
||||
use crate::region::RegionRequester;
|
||||
use crate::Client;
|
||||
|
||||
pub struct DatanodeClients {
|
||||
pub struct NodeClients {
|
||||
channel_manager: ChannelManager,
|
||||
clients: Cache<Peer, Client>,
|
||||
}
|
||||
|
||||
impl Default for DatanodeClients {
|
||||
impl Default for NodeClients {
|
||||
fn default() -> Self {
|
||||
Self::new(ChannelConfig::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for DatanodeClients {
|
||||
impl Debug for NodeClients {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DatanodeClients")
|
||||
f.debug_struct("NodeClients")
|
||||
.field("channel_manager", &self.channel_manager)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NodeManager for DatanodeClients {
|
||||
impl NodeManager for NodeClients {
|
||||
async fn datanode(&self, datanode: &Peer) -> DatanodeRef {
|
||||
let client = self.get_client(datanode).await;
|
||||
|
||||
Arc::new(RegionRequester::new(client))
|
||||
}
|
||||
|
||||
async fn flownode(&self, _node: &Peer) -> FlownodeRef {
|
||||
// TODO(weny): Support it.
|
||||
unimplemented!()
|
||||
async fn flownode(&self, flownode: &Peer) -> FlownodeRef {
|
||||
let client = self.get_client(flownode).await;
|
||||
|
||||
Arc::new(FlowRequester::new(client))
|
||||
}
|
||||
}
|
||||
|
||||
impl DatanodeClients {
|
||||
impl NodeClients {
|
||||
pub fn new(config: ChannelConfig) -> Self {
|
||||
Self {
|
||||
channel_manager: ChannelManager::with_config(config),
|
||||
|
||||
@@ -23,8 +23,6 @@ use api::v1::{
|
||||
};
|
||||
use arrow_flight::Ticket;
|
||||
use async_stream::stream;
|
||||
use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use client::{from_grpc_response, Client, Result};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
@@ -37,7 +35,8 @@ use prost::Message;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
|
||||
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||
use crate::{from_grpc_response, Client, Result};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Database {
|
||||
@@ -105,10 +104,18 @@ impl Database {
|
||||
self.catalog = catalog.into();
|
||||
}
|
||||
|
||||
pub fn catalog(&self) -> &String {
|
||||
&self.catalog
|
||||
}
|
||||
|
||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||
self.schema = schema.into();
|
||||
}
|
||||
|
||||
pub fn schema(&self) -> &String {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
||||
self.timezone = timezone.into();
|
||||
}
|
||||
@@ -156,6 +163,13 @@ impl Database {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||
self.do_get(Request::Query(QueryRequest {
|
||||
query: Some(Query::LogicalPlan(logical_plan)),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||
self.do_get(Request::Ddl(DdlRequest {
|
||||
expr: Some(DdlExpr::CreateTable(expr)),
|
||||
@@ -269,16 +283,12 @@ struct FlightContext {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic};
|
||||
use clap::Parser;
|
||||
use client::Client;
|
||||
use cmd::error::Result as CmdResult;
|
||||
use cmd::options::{GlobalOptions, Options};
|
||||
use cmd::{cli, standalone, App};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
|
||||
use super::{Database, FlightContext};
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
@@ -294,79 +304,11 @@ mod tests {
|
||||
auth_scheme: Some(basic),
|
||||
});
|
||||
|
||||
assert!(matches!(
|
||||
assert_matches!(
|
||||
ctx.auth_header,
|
||||
Some(AuthHeader {
|
||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let standalone = standalone::Command::parse_from([
|
||||
"standalone",
|
||||
"start",
|
||||
"--data-home",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
]);
|
||||
let Options::Standalone(standalone_opts) =
|
||||
standalone.load_options(&GlobalOptions::default())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let mut instance = standalone.build(*standalone_opts).await?;
|
||||
instance.start().await?;
|
||||
|
||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||
database
|
||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||
.await
|
||||
.unwrap();
|
||||
database
|
||||
.sql(
|
||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||
ts TIMESTAMP,
|
||||
TIME INDEX (ts)
|
||||
) engine=mito;
|
||||
"#,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output_dir = tempfile::tempdir().unwrap();
|
||||
let cli = cli::Command::parse_from([
|
||||
"cli",
|
||||
"export",
|
||||
"--addr",
|
||||
"127.0.0.1:4000",
|
||||
"--output-dir",
|
||||
&*output_dir.path().to_string_lossy(),
|
||||
"--target",
|
||||
"create-table",
|
||||
]);
|
||||
let mut cli_app = cli.build().await?;
|
||||
cli_app.start().await?;
|
||||
|
||||
instance.stop().await?;
|
||||
|
||||
let output_file = output_dir
|
||||
.path()
|
||||
.join("greptime-cli.export.create_table.sql");
|
||||
let res = std::fs::read_to_string(output_file).unwrap();
|
||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||
"ts" TIMESTAMP(3) NOT NULL,
|
||||
TIME INDEX ("ts")
|
||||
)
|
||||
|
||||
ENGINE=mito
|
||||
;
|
||||
"#;
|
||||
assert_eq!(res.trim(), expect.trim());
|
||||
|
||||
Ok(())
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Snafu)]
|
||||
@@ -26,7 +26,11 @@ use tonic::{Code, Status};
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||
IllegalFlightMessages { reason: String, location: Location },
|
||||
IllegalFlightMessages {
|
||||
reason: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
|
||||
FlightGet {
|
||||
@@ -37,47 +41,94 @@ pub enum Error {
|
||||
|
||||
#[snafu(display("Failure occurs during handling request"))]
|
||||
HandleRequest {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert FlightData"))]
|
||||
ConvertFlightData {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_grpc::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Column datatype error"))]
|
||||
ColumnDataType {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: api::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||
IllegalGrpcClientState { err_msg: String, location: Location },
|
||||
IllegalGrpcClientState {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||
MissingField { field: String, location: Location },
|
||||
MissingField {
|
||||
field: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
|
||||
CreateChannel {
|
||||
addr: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
||||
RegionServer { code: Code, source: BoxedError },
|
||||
#[snafu(display("Failed to create Tls channel manager"))]
|
||||
CreateTlsChannel {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: common_grpc::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
|
||||
RegionServer {
|
||||
addr: String,
|
||||
code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to request FlowServer {}, code: {}", addr, code))]
|
||||
FlowServer {
|
||||
addr: String,
|
||||
code: Code,
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
// Server error carried in Tonic Status's metadata.
|
||||
#[snafu(display("{}", msg))]
|
||||
Server { code: StatusCode, msg: String },
|
||||
Server {
|
||||
code: StatusCode,
|
||||
msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||
IllegalDatabaseResponse { err_msg: String },
|
||||
IllegalDatabaseResponse {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||
ClientStreaming { err_msg: String, location: Location },
|
||||
ClientStreaming {
|
||||
err_msg: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -94,10 +145,11 @@ impl ErrorExt for Error {
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::HandleRequest { source, .. }
|
||||
| Error::RegionServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
Error::CreateChannel { source, .. }
|
||||
| Error::ConvertFlightData { source, .. }
|
||||
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||
}
|
||||
}
|
||||
@@ -128,7 +180,11 @@ impl From<Status> for Error {
|
||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
Self::Server { code, msg }
|
||||
Self::Server {
|
||||
code,
|
||||
msg,
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,6 +202,9 @@ impl Error {
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unavailable,
|
||||
..
|
||||
} | Self::RegionServer {
|
||||
code: Code::Unknown,
|
||||
..
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
104
src/client/src/flow.rs
Normal file
104
src/client/src/flow.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::flow::{FlowRequest, FlowResponse};
|
||||
use api::v1::region::InsertRequests;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::node_manager::Flownode;
|
||||
use snafu::{location, Location, ResultExt};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::Client;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FlowRequester {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Flownode for FlowRequester {
|
||||
async fn handle(&self, request: FlowRequest) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_inner(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
}
|
||||
|
||||
async fn handle_inserts(
|
||||
&self,
|
||||
request: InsertRequests,
|
||||
) -> common_meta::error::Result<FlowResponse> {
|
||||
self.handle_inserts_inner(request)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowRequester {
|
||||
pub fn new(client: Client) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
|
||||
async fn handle_inner(&self, request: FlowRequest) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
|
||||
let response = client
|
||||
.handle_create_remove(request)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn handle_inserts_inner(&self, request: InsertRequests) -> Result<FlowResponse> {
|
||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
||||
|
||||
let requests = api::v1::flow::InsertRequests {
|
||||
requests: request
|
||||
.requests
|
||||
.into_iter()
|
||||
.map(|insert| api::v1::flow::InsertRequest {
|
||||
region_id: insert.region_id,
|
||||
rows: insert.rows,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
let response = client
|
||||
.handle_mirror_request(requests)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let code = e.code();
|
||||
let err: crate::error::Error = e.into();
|
||||
crate::error::Error::FlowServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(assert_matches)]
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
pub mod load_balance;
|
||||
mod metrics;
|
||||
pub mod region;
|
||||
@@ -29,6 +34,8 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
#[cfg(feature = "testing")]
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::{QueryRequest, RegionRequest};
|
||||
use api::v1::region::RegionRequest;
|
||||
use api::v1::ResponseHeader;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
@@ -26,12 +26,15 @@ use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
use common_meta::node_manager::Datanode;
|
||||
use common_query::request::QueryRequest;
|
||||
use common_recordbatch::error::ExternalSnafu;
|
||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||
use common_telemetry::error;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use prost::Message;
|
||||
use query::query_engine::DefaultSerializer;
|
||||
use snafu::{location, Location, OptionExt, ResultExt};
|
||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
use crate::error::{
|
||||
@@ -63,6 +66,17 @@ impl Datanode for RegionRequester {
|
||||
}
|
||||
|
||||
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
|
||||
let plan = DFLogicalSubstraitConvertor
|
||||
.encode(&request.plan, DefaultSerializer)
|
||||
.map_err(BoxedError::new)
|
||||
.context(meta_error::ExternalSnafu)?
|
||||
.to_vec();
|
||||
let request = api::v1::region::QueryRequest {
|
||||
header: request.header,
|
||||
region_id: request.region_id.as_u64(),
|
||||
plan,
|
||||
};
|
||||
|
||||
let ticket = Ticket {
|
||||
ticket: request.encode_to_vec().into(),
|
||||
};
|
||||
@@ -177,7 +191,7 @@ impl RegionRequester {
|
||||
.with_label_values(&[request_type.as_str()])
|
||||
.start_timer();
|
||||
|
||||
let mut client = self.client.raw_region_client()?;
|
||||
let (addr, mut client) = self.client.raw_region_client()?;
|
||||
|
||||
let response = client
|
||||
.handle(request)
|
||||
@@ -187,8 +201,10 @@ impl RegionRequester {
|
||||
let err: error::Error = e.into();
|
||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||
error::Error::RegionServer {
|
||||
addr,
|
||||
code,
|
||||
source: BoxedError::new(err),
|
||||
location: location!(),
|
||||
}
|
||||
})?
|
||||
.into_inner();
|
||||
@@ -272,7 +288,7 @@ mod test {
|
||||
err_msg: "blabla".to_string(),
|
||||
}),
|
||||
}));
|
||||
let Server { code, msg } = result.unwrap_err() else {
|
||||
let Server { code, msg, .. } = result.unwrap_err() else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(code, StatusCode::Internal);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user