mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 05:42:57 +00:00
Compare commits
135 Commits
poc_datafl
...
avoid-quer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bfba48755 | ||
|
|
457998f0fe | ||
|
|
b02c256157 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 | ||
|
|
05751084e7 | ||
|
|
8b6596faa0 | ||
|
|
eab309ff7e | ||
|
|
7de336f087 | ||
|
|
6e9a9dc333 | ||
|
|
848bd7e553 | ||
|
|
f0effd2680 | ||
|
|
aafb468547 | ||
|
|
4aa756c896 | ||
|
|
d3860671a8 | ||
|
|
9dd6e033a7 | ||
|
|
097f62f459 | ||
|
|
048368fd87 | ||
|
|
f9db5ff0d6 | ||
|
|
20ce7d428d | ||
|
|
75bddc0bf5 | ||
|
|
c78043d526 | ||
|
|
297105266b | ||
|
|
1de17aec74 | ||
|
|
389ded93d1 | ||
|
|
af486ec0d0 | ||
|
|
25d64255a3 | ||
|
|
3790020d78 | ||
|
|
5df3d4e5da | ||
|
|
af670df515 | ||
|
|
a58256d4d3 | ||
|
|
466f7c6448 | ||
|
|
0101657649 | ||
|
|
a3a2c8d063 | ||
|
|
dfc1acbb2a | ||
|
|
0d055b6ee6 | ||
|
|
614643ef7b | ||
|
|
b90b7adf6f | ||
|
|
418090b464 | ||
|
|
090b59e8d6 | ||
|
|
9e1af79637 | ||
|
|
9800807fe5 | ||
|
|
b86d79b906 | ||
|
|
e070ba3c32 | ||
|
|
43bf7bffd0 | ||
|
|
56aed6e6ff | ||
|
|
47785756e5 | ||
|
|
0aa523cd8c | ||
|
|
7a8222dd97 | ||
|
|
40c585890a | ||
|
|
da925e956e | ||
|
|
d7ade3c854 | ||
|
|
179c8c716c | ||
|
|
19543f9819 | ||
|
|
533ada70ca | ||
|
|
c50ff23194 | ||
|
|
d7f1150098 | ||
|
|
82c3eca25e | ||
|
|
df13832a59 | ||
|
|
7da92eb9eb | ||
|
|
c71298d3d5 | ||
|
|
de594833ac | ||
|
|
6a9a92931d | ||
|
|
11ad5b3ed1 | ||
|
|
b8354bbb55 | ||
|
|
258675b75e | ||
|
|
11a08cb272 | ||
|
|
e9b178b8b9 | ||
|
|
3477fde0e5 | ||
|
|
9baa431656 | ||
|
|
e2a1cb5840 | ||
|
|
f696f41a02 | ||
|
|
0168d43d60 | ||
|
|
e372e25e30 | ||
|
|
ca409a732f | ||
|
|
5c0a530ad1 | ||
|
|
4b030456f6 | ||
|
|
f93b5b19f0 | ||
|
|
669a6d84e9 | ||
|
|
a45017ad71 | ||
|
|
0d9e71b653 | ||
|
|
93f178f3ad | ||
|
|
9f4a6c6fe2 | ||
|
|
c915916b62 | ||
|
|
dff7ba7598 | ||
|
|
fe34ebf770 | ||
|
|
a1c51a5885 | ||
|
|
63a8d293a1 | ||
|
|
6c621b7fcf | ||
|
|
529e344450 | ||
|
|
2a169f9364 | ||
|
|
97eb196699 | ||
|
|
cfae276d37 | ||
|
|
09129a911e | ||
|
|
15d7b9755e | ||
|
|
72897a20e3 | ||
|
|
c04d02460f | ||
|
|
4ca7ac7632 | ||
|
|
a260ba3ee7 | ||
|
|
efd3f04b7c | ||
|
|
f16ce3ca27 | ||
|
|
6214180ecd | ||
|
|
00e21e2021 | ||
|
|
494ce65729 | ||
|
|
e15294db41 | ||
|
|
be1eb4efb7 | ||
|
|
9d12496aaf | ||
|
|
5d8084a32f | ||
|
|
60eb5de3f1 | ||
|
|
a0be7198f9 | ||
|
|
6ab3aeb142 | ||
|
|
590aedd466 | ||
|
|
27e376e892 | ||
|
|
36c41d129c | ||
|
|
89da42dbc1 | ||
|
|
04852aa27e | ||
|
|
d0820bb26d | ||
|
|
fa6c371380 | ||
|
|
9aa2182cb2 | ||
|
|
bca2e393bf | ||
|
|
b1ef327bac | ||
|
|
115c74791d | ||
|
|
aec5cca2c7 | ||
|
|
06e1c43743 | ||
|
|
9d36c31209 | ||
|
|
c91132bd14 | ||
|
|
25e9076f5b | ||
|
|
08945f128b | ||
|
|
5a0629eaa0 |
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Bug report
|
name: Bug report
|
||||||
description: Is something not working? Help us fix it!
|
description: Is something not working? Help us fix it!
|
||||||
labels: [ "bug" ]
|
labels: [ "C-bug" ]
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,5 +4,5 @@ contact_links:
|
|||||||
url: https://greptime.com/slack
|
url: https://greptime.com/slack
|
||||||
about: Get free help from the Greptime community
|
about: Get free help from the Greptime community
|
||||||
- name: Greptime Community Discussion
|
- name: Greptime Community Discussion
|
||||||
url: https://github.com/greptimeTeam/greptimedb/discussions
|
url: https://github.com/greptimeTeam/discussions
|
||||||
about: Get free help from the Greptime community
|
about: Get free help from the Greptime community
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
2
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Enhancement
|
name: Enhancement
|
||||||
description: Suggest an enhancement to existing functionality
|
description: Suggest an enhancement to existing functionality
|
||||||
labels: [ "enhancement" ]
|
labels: [ "C-enhancement" ]
|
||||||
body:
|
body:
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: type
|
id: type
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: Feature request
|
name: New Feature
|
||||||
description: Suggest a new feature for GreptimeDB
|
description: Suggest a new feature for GreptimeDB
|
||||||
labels: [ "feature request" ]
|
labels: [ "C-feature" ]
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
id: info
|
id: info
|
||||||
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
18
.github/actions/build-and-push-ci-image/action.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and push CI Docker image
|
||||||
|
description: Build and push CI Docker image to local registry
|
||||||
|
inputs:
|
||||||
|
binary_path:
|
||||||
|
default: "./bin"
|
||||||
|
description: "Binary path"
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Build and push to local registry
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./docker/ci/ubuntu/Dockerfile.fuzztests
|
||||||
|
push: true
|
||||||
|
tags: localhost:5001/greptime/greptimedb:latest
|
||||||
|
build-args: |
|
||||||
|
BINARY_PATH=${{ inputs.binary_path }}
|
||||||
@@ -59,6 +59,9 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: pwsh
|
shell: pwsh
|
||||||
run: make test sqlness-test
|
run: make test sqlness-test
|
||||||
|
env:
|
||||||
|
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() }} # Only upload logs when the integration tests failed.
|
||||||
|
|||||||
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
16
.github/actions/setup-cyborg/action.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: Setup cyborg environment
|
||||||
|
description: Setup cyborg environment
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 22
|
||||||
|
- uses: pnpm/action-setup@v3
|
||||||
|
with:
|
||||||
|
package_json_file: 'cyborg/package.json'
|
||||||
|
run_install: true
|
||||||
|
- name: Describe the Environment
|
||||||
|
working-directory: cyborg
|
||||||
|
shell: bash
|
||||||
|
run: pnpm tsx -v
|
||||||
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
25
.github/actions/setup-etcd-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: Setup Etcd cluster
|
||||||
|
description: Deploy Etcd cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
etcd-replicas:
|
||||||
|
default: 3
|
||||||
|
description: "Etcd replicas"
|
||||||
|
namespace:
|
||||||
|
default: "etcd-cluster"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install Etcd cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade \
|
||||||
|
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||||
|
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||||
|
--set resources.requests.cpu=50m \
|
||||||
|
--set resources.requests.memory=128Mi \
|
||||||
|
--set auth.rbac.create=false \
|
||||||
|
--set auth.rbac.token.enabled=false \
|
||||||
|
--set persistence.size=2Gi \
|
||||||
|
--create-namespace \
|
||||||
|
-n ${{ inputs.namespace }}
|
||||||
85
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
85
.github/actions/setup-greptimedb-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
name: Setup GreptimeDB cluster
|
||||||
|
description: Deploy GreptimeDB cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
frontend-replicas:
|
||||||
|
default: 2
|
||||||
|
description: "Number of Frontend replicas"
|
||||||
|
datanode-replicas:
|
||||||
|
default: 2
|
||||||
|
description: "Number of Datanode replicas"
|
||||||
|
meta-replicas:
|
||||||
|
default: 3
|
||||||
|
description: "Number of Metasrv replicas"
|
||||||
|
image-registry:
|
||||||
|
default: "docker.io"
|
||||||
|
description: "Image registry"
|
||||||
|
image-repository:
|
||||||
|
default: "greptime/greptimedb"
|
||||||
|
description: "Image repository"
|
||||||
|
image-tag:
|
||||||
|
default: "latest"
|
||||||
|
description: 'Image tag'
|
||||||
|
etcd-endpoints:
|
||||||
|
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||||
|
description: "Etcd endpoints"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install GreptimeDB operator
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
|
helm repo update
|
||||||
|
helm upgrade \
|
||||||
|
--install \
|
||||||
|
--create-namespace \
|
||||||
|
greptimedb-operator greptime/greptimedb-operator \
|
||||||
|
-n greptimedb-admin \
|
||||||
|
--wait \
|
||||||
|
--wait-for-jobs
|
||||||
|
- name: Install GreptimeDB cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade \
|
||||||
|
--install my-greptimedb \
|
||||||
|
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||||
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
|
--set image.tag=${{ inputs.image-tag }} \
|
||||||
|
--set base.podTemplate.main.resources.requests.cpu=50m \
|
||||||
|
--set base.podTemplate.main.resources.requests.memory=256Mi \
|
||||||
|
--set base.podTemplate.main.resources.limits.cpu=1000m \
|
||||||
|
--set base.podTemplate.main.resources.limits.memory=2Gi \
|
||||||
|
--set frontend.replicas=${{ inputs.frontend-replicas }} \
|
||||||
|
--set datanode.replicas=${{ inputs.datanode-replicas }} \
|
||||||
|
--set meta.replicas=${{ inputs.meta-replicas }} \
|
||||||
|
greptime/greptimedb-cluster \
|
||||||
|
--create-namespace \
|
||||||
|
-n my-greptimedb \
|
||||||
|
--wait \
|
||||||
|
--wait-for-jobs
|
||||||
|
- name: Wait for GreptimeDB
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
while true; do
|
||||||
|
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||||
|
if [ "$PHASE" == "Running" ]; then
|
||||||
|
echo "Cluster is ready"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Cluster is not ready yet: Current phase: $PHASE"
|
||||||
|
kubectl get pods -n my-greptimedb
|
||||||
|
sleep 5 # wait for 5 seconds before check again.
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
- name: Print GreptimeDB info
|
||||||
|
if: always()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl get all --show-labels -n my-greptimedb
|
||||||
|
- name: Describe Nodes
|
||||||
|
if: always()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe nodes
|
||||||
10
.github/actions/setup-kind/action.yml
vendored
Normal file
10
.github/actions/setup-kind/action.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
name: Setup Kind
|
||||||
|
description: Deploy Kind
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Create kind cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
./.github/scripts/kind-with-registry.sh
|
||||||
11
.github/actions/sqlness-test/action.yml
vendored
11
.github/actions/sqlness-test/action.yml
vendored
@@ -57,3 +57,14 @@ runs:
|
|||||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
run: |
|
run: |
|
||||||
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
|
||||||
|
- name: Export kind logs
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: kind export logs -n greptimedb-operator-e2e /tmp/kind
|
||||||
|
- name: Upload logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: kind-logs
|
||||||
|
path: /tmp/kind
|
||||||
|
retention-days: 3
|
||||||
|
|||||||
4
.github/doc-label-config.yml
vendored
4
.github/doc-label-config.yml
vendored
@@ -1,4 +0,0 @@
|
|||||||
Doc not needed:
|
|
||||||
- '- \[x\] This PR does not require documentation updates.'
|
|
||||||
Doc update required:
|
|
||||||
- '- \[ \] This PR does not require documentation updates.'
|
|
||||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -15,6 +15,6 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
|
|||||||
|
|
||||||
## Checklist
|
## Checklist
|
||||||
|
|
||||||
- [ ] I have written the necessary rustdoc comments.
|
- [ ] I have written the necessary rustdoc comments.
|
||||||
- [ ] I have added the necessary unit tests and integration tests.
|
- [ ] I have added the necessary unit tests and integration tests.
|
||||||
- [x] This PR does not require documentation updates.
|
- [ ] This PR requires documentation updates.
|
||||||
|
|||||||
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
66
.github/scripts/kind-with-registry.sh
vendored
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# 1. Create registry container unless it already exists
|
||||||
|
reg_name='kind-registry'
|
||||||
|
reg_port='5001'
|
||||||
|
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||||
|
docker run \
|
||||||
|
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
|
||||||
|
registry:2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 2. Create kind cluster with containerd registry config dir enabled
|
||||||
|
# TODO: kind will eventually enable this by default and this patch will
|
||||||
|
# be unnecessary.
|
||||||
|
#
|
||||||
|
# See:
|
||||||
|
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||||
|
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||||
|
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||||
|
cat <<EOF | kind create cluster --wait 2m --config=-
|
||||||
|
kind: Cluster
|
||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
containerdConfigPatches:
|
||||||
|
- |-
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||||
|
config_path = "/etc/containerd/certs.d"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# 3. Add the registry config to the nodes
|
||||||
|
#
|
||||||
|
# This is necessary because localhost resolves to loopback addresses that are
|
||||||
|
# network-namespace local.
|
||||||
|
# In other words: localhost in the container is not localhost on the host.
|
||||||
|
#
|
||||||
|
# We want a consistent name that works from both ends, so we tell containerd to
|
||||||
|
# alias localhost:${reg_port} to the registry container when pulling images
|
||||||
|
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||||
|
for node in $(kind get nodes); do
|
||||||
|
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||||
|
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||||
|
[host."http://${reg_name}:5000"]
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
|
||||||
|
# 4. Connect the registry to the cluster network if not already connected
|
||||||
|
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||||
|
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||||
|
docker network connect "kind" "${reg_name}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 5. Document the local registry
|
||||||
|
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: local-registry-hosting
|
||||||
|
namespace: kube-public
|
||||||
|
data:
|
||||||
|
localRegistryHosting.v1: |
|
||||||
|
host: "localhost:${reg_port}"
|
||||||
|
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||||
|
EOF
|
||||||
2
.github/workflows/apidoc.yml
vendored
2
.github/workflows/apidoc.yml
vendored
@@ -13,7 +13,7 @@ on:
|
|||||||
name: Build API docs
|
name: Build API docs
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
apidoc:
|
apidoc:
|
||||||
|
|||||||
20
.github/workflows/dev-build.yml
vendored
20
.github/workflows/dev-build.yml
vendored
@@ -82,6 +82,9 @@ env:
|
|||||||
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
|
||||||
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
@@ -321,7 +324,7 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
@@ -330,16 +333,25 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- name: Notifiy dev build successful result
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Report CI status
|
||||||
|
id: report-ci-status
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||||
|
- name: Notify dev build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notifiy dev build failed result
|
- name: Notify dev build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
141
.github/workflows/develop.yml
vendored
141
.github/workflows/develop.yml
vendored
@@ -30,7 +30,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-typos-and-docs:
|
check-typos-and-docs:
|
||||||
@@ -57,7 +57,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest, ubuntu-20.04 ]
|
os: [ windows-2022, ubuntu-20.04 ]
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -212,7 +212,14 @@ jobs:
|
|||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Fuzz Test
|
- name: Build Fuzz Test
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd tests-fuzz &
|
||||||
|
cargo install cargo-gc-bin &
|
||||||
|
cargo gc &
|
||||||
|
cd ..
|
||||||
|
- name: Run Fuzz Test
|
||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
@@ -231,6 +238,130 @@ jobs:
|
|||||||
path: /tmp/unstable-greptime/
|
path: /tmp/unstable-greptime/
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
|
build-greptime-ci:
|
||||||
|
name: Build GreptimeDB binary (profile-CI)
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ ubuntu-20.04 ]
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "build-greptime-ci"
|
||||||
|
- name: Install cargo-gc-bin
|
||||||
|
shell: bash
|
||||||
|
run: cargo install cargo-gc-bin
|
||||||
|
- name: Build greptime bianry
|
||||||
|
shell: bash
|
||||||
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
|
run: cargo build --bin greptime --profile ci
|
||||||
|
- name: Pack greptime binary
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir bin && \
|
||||||
|
mv ./target/ci/greptime bin
|
||||||
|
- name: Print greptime binaries info
|
||||||
|
run: ls -lh bin
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
with:
|
||||||
|
artifacts-dir: bin
|
||||||
|
version: current
|
||||||
|
|
||||||
|
distributed-fuzztest:
|
||||||
|
name: Fuzz Test (Distributed, Disk)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build-greptime-ci
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup Kind
|
||||||
|
uses: ./.github/actions/setup-kind
|
||||||
|
- name: Setup Etcd cluser
|
||||||
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
|
# Prepares for fuzz tests
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ env.RUST_TOOLCHAIN }}
|
||||||
|
- name: Rust Cache
|
||||||
|
uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
# Shares across multiple jobs
|
||||||
|
shared-key: "fuzz-test-targets"
|
||||||
|
- name: Set Rust Fuzz
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
|
rustup install nightly
|
||||||
|
cargo +nightly install cargo-fuzz
|
||||||
|
# Downloads ci image
|
||||||
|
- name: Download pre-built binariy
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: bin
|
||||||
|
path: .
|
||||||
|
- name: Unzip binary
|
||||||
|
run: tar -xvf ./bin.tar.gz
|
||||||
|
- name: Build and push GreptimeDB image
|
||||||
|
uses: ./.github/actions/build-and-push-ci-image
|
||||||
|
- name: Wait for etcd
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app.kubernetes.io/instance=etcd \
|
||||||
|
--timeout=120s \
|
||||||
|
-n etcd-cluster
|
||||||
|
- name: Print etcd info
|
||||||
|
shell: bash
|
||||||
|
run: kubectl get all --show-labels -n etcd-cluster
|
||||||
|
# Setup cluster for test
|
||||||
|
- name: Setup GreptimeDB cluster
|
||||||
|
uses: ./.github/actions/setup-greptimedb-cluster
|
||||||
|
with:
|
||||||
|
image-registry: localhost:5001
|
||||||
|
- name: Port forward (mysql)
|
||||||
|
run: |
|
||||||
|
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||||
|
- name: Fuzz Test
|
||||||
|
uses: ./.github/actions/fuzz-test
|
||||||
|
env:
|
||||||
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
|
with:
|
||||||
|
target: ${{ matrix.target }}
|
||||||
|
max-total-time: 120
|
||||||
|
- name: Describe Nodes
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kubectl describe nodes
|
||||||
|
- name: Export kind logs
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kind export logs /tmp/kind
|
||||||
|
- name: Upload logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
||||||
|
path: /tmp/kind
|
||||||
|
retention-days: 3
|
||||||
|
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test
|
||||||
@@ -256,7 +387,7 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs
|
name: sqlness-logs
|
||||||
path: /tmp/sqlness-*
|
path: /tmp/sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
sqlness-kafka-wal:
|
sqlness-kafka-wal:
|
||||||
@@ -286,7 +417,7 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs-with-kafka-wal
|
name: sqlness-logs-with-kafka-wal
|
||||||
path: /tmp/sqlness-*
|
path: /tmp/sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
|
|||||||
39
.github/workflows/doc-issue.yml
vendored
39
.github/workflows/doc-issue.yml
vendored
@@ -1,39 +0,0 @@
|
|||||||
name: Create Issue in downstream repos
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types:
|
|
||||||
- labeled
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- labeled
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
doc_issue:
|
|
||||||
if: github.event.label.name == 'doc update required'
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
- name: create an issue in doc repo
|
|
||||||
uses: dacbd/create-issue-action@v1.2.1
|
|
||||||
with:
|
|
||||||
owner: GreptimeTeam
|
|
||||||
repo: docs
|
|
||||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
|
||||||
body: |
|
|
||||||
A document change request is generated from
|
|
||||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
|
||||||
cloud_issue:
|
|
||||||
if: github.event.label.name == 'cloud followup required'
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
- name: create an issue in cloud repo
|
|
||||||
uses: dacbd/create-issue-action@v1.2.1
|
|
||||||
with:
|
|
||||||
owner: GreptimeTeam
|
|
||||||
repo: greptimedb-cloud
|
|
||||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
title: Followup changes in ${{ github.event.issue.title || github.event.pull_request.title }}
|
|
||||||
body: |
|
|
||||||
A followup request is generated from
|
|
||||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
|
||||||
36
.github/workflows/doc-label.yml
vendored
36
.github/workflows/doc-label.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: "PR Doc Labeler"
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
triage:
|
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: github/issue-labeler@v3.4
|
|
||||||
with:
|
|
||||||
configuration-path: .github/doc-label-config.yml
|
|
||||||
enable-versioned-regex: false
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
sync-labels: 1
|
|
||||||
- name: create an issue in doc repo
|
|
||||||
uses: dacbd/create-issue-action@v1.2.1
|
|
||||||
if: ${{ github.event.action == 'opened' && contains(github.event.pull_request.body, '- [ ] This PR does not require documentation updates.') }}
|
|
||||||
with:
|
|
||||||
owner: GreptimeTeam
|
|
||||||
repo: docs
|
|
||||||
token: ${{ secrets.DOCS_REPO_TOKEN }}
|
|
||||||
title: Update docs for ${{ github.event.issue.title || github.event.pull_request.title }}
|
|
||||||
body: |
|
|
||||||
A document change request is generated from
|
|
||||||
${{ github.event.issue.html_url || github.event.pull_request.html_url }}
|
|
||||||
- name: Check doc labels
|
|
||||||
uses: docker://agilepathway/pull-request-label-checker:latest
|
|
||||||
with:
|
|
||||||
one_of: Doc update required,Doc not needed
|
|
||||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
22
.github/workflows/docbot.yml
vendored
Normal file
22
.github/workflows/docbot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
name: Follow Up Docs
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, edited]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docbot:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 10
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Maybe Follow Up Docs Issue
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/follow-up-docs-issue.ts
|
||||||
|
env:
|
||||||
|
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
27
.github/workflows/nightly-build.yml
vendored
27
.github/workflows/nightly-build.yml
vendored
@@ -66,6 +66,13 @@ env:
|
|||||||
|
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
|
|
||||||
|
# Use the different image name to avoid conflict with the release images.
|
||||||
|
# The DockerHub image will be greptime/greptimedb-nightly.
|
||||||
|
IMAGE_NAME: greptimedb-nightly
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
name: Allocate runners
|
name: Allocate runners
|
||||||
@@ -188,6 +195,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
image-registry: docker.io
|
image-registry: docker.io
|
||||||
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
|
image-name: ${{ env.IMAGE_NAME }}
|
||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
@@ -220,7 +228,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
src-image-registry: docker.io
|
src-image-registry: docker.io
|
||||||
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
|
||||||
src-image-name: greptimedb
|
src-image-name: ${{ env.IMAGE_NAME }}
|
||||||
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
|
||||||
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
|
||||||
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
|
||||||
@@ -285,7 +293,7 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub
|
||||||
@@ -294,16 +302,25 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- name: Notifiy nightly build successful result
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Report CI status
|
||||||
|
id: report-ci-status
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||||
|
- name: Notify nightly build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notifiy nightly build failed result
|
- name: Notify nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
81
.github/workflows/nightly-ci.yml
vendored
81
.github/workflows/nightly-ci.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 23 * * 1-5'
|
- cron: "0 23 * * 1-5"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
name: Nightly CI
|
name: Nightly CI
|
||||||
@@ -10,7 +10,10 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sqlness-test:
|
sqlness-test:
|
||||||
@@ -22,7 +25,6 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Run sqlness test
|
- name: Run sqlness test
|
||||||
uses: ./.github/actions/sqlness-test
|
uses: ./.github/actions/sqlness-test
|
||||||
with:
|
with:
|
||||||
@@ -35,10 +37,11 @@ jobs:
|
|||||||
sqlness-windows:
|
sqlness-windows:
|
||||||
name: Sqlness tests on Windows
|
name: Sqlness tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-latest-8-cores
|
runs-on: windows-2022-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -49,14 +52,6 @@ jobs:
|
|||||||
uses: Swatinem/rust-cache@v2
|
uses: Swatinem/rust-cache@v2
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: cargo sqlness
|
run: cargo sqlness
|
||||||
- name: Notify slack if failed
|
|
||||||
if: failure()
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "Nightly CI failed for sqlness tests"}
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
@@ -68,14 +63,18 @@ jobs:
|
|||||||
test-on-windows:
|
test-on-windows:
|
||||||
name: Run tests on Windows
|
name: Run tests on Windows
|
||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
runs-on: windows-latest-8-cores
|
runs-on: windows-2022-8-cores
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- run: git config --global core.autocrlf false
|
- run: git config --global core.autocrlf false
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
- uses: arduino/setup-protoc@v3
|
- uses: arduino/setup-protoc@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
|
with:
|
||||||
|
version: "14.0"
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@master
|
uses: dtolnay/rust-toolchain@master
|
||||||
with:
|
with:
|
||||||
@@ -88,7 +87,7 @@ jobs:
|
|||||||
- name: Install Python
|
- name: Install Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: "3.10"
|
||||||
- name: Install PyArrow Package
|
- name: Install PyArrow Package
|
||||||
run: pip install pyarrow
|
run: pip install pyarrow
|
||||||
- name: Install WSL distribution
|
- name: Install WSL distribution
|
||||||
@@ -98,18 +97,62 @@ jobs:
|
|||||||
- name: Running tests
|
- name: Running tests
|
||||||
run: cargo nextest run -F pyo3_backend,dashboard
|
run: cargo nextest run -F pyo3_backend,dashboard
|
||||||
env:
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
|
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
- name: Notify slack if failed
|
|
||||||
if: failure()
|
check-status:
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
name: Check status
|
||||||
|
needs: [
|
||||||
|
sqlness-test,
|
||||||
|
sqlness-windows,
|
||||||
|
test-on-windows,
|
||||||
|
]
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
outputs:
|
||||||
|
check-result: ${{ steps.set-check-result.outputs.check-result }}
|
||||||
|
steps:
|
||||||
|
- name: Set check result
|
||||||
|
id: set-check-result
|
||||||
|
run: |
|
||||||
|
echo "check-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
notification:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
|
name: Send notification to Greptime team
|
||||||
|
needs: [
|
||||||
|
check-status
|
||||||
|
]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Report CI status
|
||||||
|
id: report-ci-status
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
CI_REPORT_STATUS: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||||
|
- name: Notify dev build successful result
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
if: ${{ needs.check-status.outputs.check-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "Nightly CI failed for cargo test"}
|
{"text": "Nightly CI has completed successfully."}
|
||||||
|
|
||||||
|
- name: Notify dev build failed result
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
if: ${{ needs.check-status.outputs.check-result != 'success' }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Nightly CI failed has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
32
.github/workflows/release.yml
vendored
32
.github/workflows/release.yml
vendored
@@ -82,7 +82,7 @@ on:
|
|||||||
# Use env variables to control all the release process.
|
# Use env variables to control all the release process.
|
||||||
env:
|
env:
|
||||||
# The arguments of building greptime.
|
# The arguments of building greptime.
|
||||||
RUST_TOOLCHAIN: nightly-2024-04-18
|
RUST_TOOLCHAIN: nightly-2024-04-20
|
||||||
CARGO_PROFILE: nightly
|
CARGO_PROFILE: nightly
|
||||||
|
|
||||||
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
# Controls whether to run tests, include unit-test, integration-test and sqlness.
|
||||||
@@ -91,7 +91,12 @@ env:
|
|||||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||||
NIGHTLY_RELEASE_PREFIX: nightly
|
NIGHTLY_RELEASE_PREFIX: nightly
|
||||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||||
NEXT_RELEASE_VERSION: v0.8.0
|
NEXT_RELEASE_VERSION: v0.9.0
|
||||||
|
|
||||||
|
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||||
|
permissions:
|
||||||
|
issues: write # Allows the action to create issues for cyborg.
|
||||||
|
contents: write # Allows the action to create a release.
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allocate-runners:
|
allocate-runners:
|
||||||
@@ -102,7 +107,7 @@ jobs:
|
|||||||
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
|
||||||
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
macos-runner: ${{ inputs.macos_runner || vars.DEFAULT_MACOS_RUNNER }}
|
||||||
windows-runner: windows-latest-8-cores
|
windows-runner: windows-2022-8-cores
|
||||||
|
|
||||||
# The following EC2 resource id will be used for resource releasing.
|
# The following EC2 resource id will be used for resource releasing.
|
||||||
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
linux-amd64-ec2-runner-label: ${{ steps.start-linux-amd64-runner.outputs.label }}
|
||||||
@@ -245,7 +250,7 @@ jobs:
|
|||||||
- name: Set build macos result
|
- name: Set build macos result
|
||||||
id: set-build-macos-result
|
id: set-build-macos-result
|
||||||
run: |
|
run: |
|
||||||
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
echo "build-macos-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
build-windows-artifacts:
|
build-windows-artifacts:
|
||||||
name: Build Windows artifacts
|
name: Build Windows artifacts
|
||||||
@@ -318,7 +323,7 @@ jobs:
|
|||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
run: |
|
run: |
|
||||||
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
echo "build-image-result=success" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
release-cn-artifacts:
|
release-cn-artifacts:
|
||||||
name: Release artifacts to CN region
|
name: Release artifacts to CN region
|
||||||
@@ -436,7 +441,7 @@ jobs:
|
|||||||
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||||
|
|
||||||
notification:
|
notification:
|
||||||
if: ${{ always() || github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
@@ -447,16 +452,25 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
|
||||||
steps:
|
steps:
|
||||||
- name: Notifiy release successful result
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/setup-cyborg
|
||||||
|
- name: Report CI status
|
||||||
|
id: report-ci-status
|
||||||
|
working-directory: cyborg
|
||||||
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||||
|
- name: Notify release successful result
|
||||||
uses: slackapi/slack-github-action@v1.25.0
|
uses: slackapi/slack-github-action@v1.25.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result == 'success' && needs.build-windows-artifacts.outputs.build-windows-result == 'success' && needs.build-macos-artifacts.outputs.build-macos-result == 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's release version has completed successfully."}
|
{"text": "GreptimeDB's release version has completed successfully."}
|
||||||
|
|
||||||
- name: Notifiy release failed result
|
- name: Notify release failed result
|
||||||
uses: slackapi/slack-github-action@v1.25.0
|
uses: slackapi/slack-github-action@v1.25.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.build-image-result != 'success' || needs.build-windows-artifacts.outputs.build-windows-result != 'success' || needs.build-macos-artifacts.outputs.build-macos-result != 'success' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's release version has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/release.yml'."}
|
{"text": "GreptimeDB's release version has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
11
.github/workflows/schedule.yml
vendored
11
.github/workflows/schedule.yml
vendored
@@ -16,16 +16,7 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v4
|
- uses: ./.github/actions/setup-cyborg
|
||||||
with:
|
|
||||||
node-version: 22
|
|
||||||
- uses: pnpm/action-setup@v3
|
|
||||||
with:
|
|
||||||
package_json_file: 'cyborg/package.json'
|
|
||||||
run_install: true
|
|
||||||
- name: Describe the Environment
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx -v
|
|
||||||
- name: Do Maintenance
|
- name: Do Maintenance
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
run: pnpm tsx bin/schedule.ts
|
run: pnpm tsx bin/schedule.ts
|
||||||
|
|||||||
11
.github/workflows/semantic-pull-request.yml
vendored
11
.github/workflows/semantic-pull-request.yml
vendored
@@ -13,16 +13,7 @@ jobs:
|
|||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v4
|
- uses: ./.github/actions/setup-cyborg
|
||||||
with:
|
|
||||||
node-version: 22
|
|
||||||
- uses: pnpm/action-setup@v3
|
|
||||||
with:
|
|
||||||
package_json_file: 'cyborg/package.json'
|
|
||||||
run_install: true
|
|
||||||
- name: Describe the Environment
|
|
||||||
working-directory: cyborg
|
|
||||||
run: pnpm tsx -v
|
|
||||||
- name: Check Pull Request
|
- name: Check Pull Request
|
||||||
working-directory: cyborg
|
working-directory: cyborg
|
||||||
run: pnpm tsx bin/check-pull-request.ts
|
run: pnpm tsx bin/check-pull-request.ts
|
||||||
|
|||||||
1199
Cargo.lock
generated
1199
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
25
Cargo.toml
25
Cargo.toml
@@ -4,6 +4,7 @@ members = [
|
|||||||
"src/api",
|
"src/api",
|
||||||
"src/auth",
|
"src/auth",
|
||||||
"src/catalog",
|
"src/catalog",
|
||||||
|
"src/cache",
|
||||||
"src/client",
|
"src/client",
|
||||||
"src/cmd",
|
"src/cmd",
|
||||||
"src/common/base",
|
"src/common/base",
|
||||||
@@ -63,13 +64,14 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.7.2"
|
version = "0.8.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[workspace.lints]
|
[workspace.lints]
|
||||||
clippy.print_stdout = "warn"
|
clippy.print_stdout = "warn"
|
||||||
clippy.print_stderr = "warn"
|
clippy.print_stderr = "warn"
|
||||||
|
clippy.dbg_macro = "warn"
|
||||||
clippy.implicit_clone = "warn"
|
clippy.implicit_clone = "warn"
|
||||||
clippy.readonly_write_lock = "allow"
|
clippy.readonly_write_lock = "allow"
|
||||||
rust.unknown_lints = "deny"
|
rust.unknown_lints = "deny"
|
||||||
@@ -99,6 +101,7 @@ bytemuck = "1.12"
|
|||||||
bytes = { version = "1.5", features = ["serde"] }
|
bytes = { version = "1.5", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
@@ -107,6 +110,7 @@ datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev
|
|||||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
|
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
@@ -116,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "219b2409bb701f75b43fc0ba64967d2ed8e75491" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -136,6 +140,7 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
|
|||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
prometheus = { version = "0.13.3", features = ["process"] }
|
prometheus = { version = "0.13.3", features = ["process"] }
|
||||||
|
promql-parser = { version = "0.4" }
|
||||||
prost = "0.12"
|
prost = "0.12"
|
||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
@@ -154,10 +159,10 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||||
serde_with = "3"
|
serde_with = "3"
|
||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.7"
|
snafu = "0.8"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.44.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
@@ -166,13 +171,15 @@ tokio = { version = "1.36", features = ["full"] }
|
|||||||
tokio-stream = { version = "0.1" }
|
tokio-stream = { version = "0.1" }
|
||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls"] }
|
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||||
|
tower = { version = "0.4" }
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
|
||||||
## workspaces members
|
## workspaces members
|
||||||
api = { path = "src/api" }
|
api = { path = "src/api" }
|
||||||
auth = { path = "src/auth" }
|
auth = { path = "src/auth" }
|
||||||
|
cache = { path = "src/cache" }
|
||||||
catalog = { path = "src/catalog" }
|
catalog = { path = "src/catalog" }
|
||||||
client = { path = "src/client" }
|
client = { path = "src/client" }
|
||||||
cmd = { path = "src/cmd" }
|
cmd = { path = "src/cmd" }
|
||||||
@@ -204,6 +211,7 @@ common-wal = { path = "src/common/wal" }
|
|||||||
datanode = { path = "src/datanode" }
|
datanode = { path = "src/datanode" }
|
||||||
datatypes = { path = "src/datatypes" }
|
datatypes = { path = "src/datatypes" }
|
||||||
file-engine = { path = "src/file-engine" }
|
file-engine = { path = "src/file-engine" }
|
||||||
|
flow = { path = "src/flow" }
|
||||||
frontend = { path = "src/frontend" }
|
frontend = { path = "src/frontend" }
|
||||||
index = { path = "src/index" }
|
index = { path = "src/index" }
|
||||||
log-store = { path = "src/log-store" }
|
log-store = { path = "src/log-store" }
|
||||||
@@ -225,8 +233,6 @@ sql = { path = "src/sql" }
|
|||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
# TODO some code depends on this
|
|
||||||
tests-integration = { path = "tests-integration" }
|
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
@@ -242,6 +248,11 @@ lto = "thin"
|
|||||||
debug = false
|
debug = false
|
||||||
incremental = false
|
incremental = false
|
||||||
|
|
||||||
|
[profile.ci]
|
||||||
|
inherits = "dev"
|
||||||
|
debug = false
|
||||||
|
strip = true
|
||||||
|
|
||||||
[profile.dev.package.sqlness-runner]
|
[profile.dev.package.sqlness-runner]
|
||||||
debug = false
|
debug = false
|
||||||
strip = true
|
strip = true
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -199,7 +199,7 @@ config-docs: ## Generate configuration documentation from toml files.
|
|||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v ${PWD}:/greptimedb \
|
-v ${PWD}:/greptimedb \
|
||||||
-w /greptimedb/config \
|
-w /greptimedb/config \
|
||||||
toml2docs/toml2docs:latest \
|
toml2docs/toml2docs:v0.1.1 \
|
||||||
-p '##' \
|
-p '##' \
|
||||||
-t ./config-docs-template.md \
|
-t ./config-docs-template.md \
|
||||||
-o ./config.md
|
-o ./config.md
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ api.workspace = true
|
|||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
client.workspace = true
|
client = { workspace = true, features = ["testing"] }
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
@@ -33,8 +33,6 @@ rand.workspace = true
|
|||||||
rskafka.workspace = true
|
rskafka.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
# TODO depend `Database` client
|
|
||||||
tests-integration.workspace = true
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
|
|||||||
use rand::rngs::SmallRng;
|
use rand::rngs::SmallRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use store_api::logstore::provider::Provider;
|
||||||
use store_api::logstore::LogStore;
|
use store_api::logstore::LogStore;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
@@ -210,7 +211,7 @@ impl From<Args> for Config {
|
|||||||
pub struct Region {
|
pub struct Region {
|
||||||
id: RegionId,
|
id: RegionId,
|
||||||
schema: Vec<ColumnSchema>,
|
schema: Vec<ColumnSchema>,
|
||||||
wal_options: WalOptions,
|
provider: Provider,
|
||||||
next_sequence: AtomicU64,
|
next_sequence: AtomicU64,
|
||||||
next_entry_id: AtomicU64,
|
next_entry_id: AtomicU64,
|
||||||
next_timestamp: AtomicI64,
|
next_timestamp: AtomicI64,
|
||||||
@@ -227,10 +228,14 @@ impl Region {
|
|||||||
num_rows: u32,
|
num_rows: u32,
|
||||||
rng_seed: u64,
|
rng_seed: u64,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let provider = match wal_options {
|
||||||
|
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||||
|
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||||
|
};
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
schema,
|
schema,
|
||||||
wal_options,
|
provider,
|
||||||
next_sequence: AtomicU64::new(1),
|
next_sequence: AtomicU64::new(1),
|
||||||
next_entry_id: AtomicU64::new(1),
|
next_entry_id: AtomicU64::new(1),
|
||||||
next_timestamp: AtomicI64::new(1655276557000),
|
next_timestamp: AtomicI64::new(1655276557000),
|
||||||
@@ -258,14 +263,14 @@ impl Region {
|
|||||||
self.id,
|
self.id,
|
||||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||||
&entry,
|
&entry,
|
||||||
&self.wal_options,
|
&self.provider,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replays the region.
|
/// Replays the region.
|
||||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||||
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||||
while let Some(res) = wal_stream.next().await {
|
while let Some(res) = wal_stream.next().await {
|
||||||
let (_, entry) = res.unwrap();
|
let (_, entry) = res.unwrap();
|
||||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
|||||||
@@ -1,10 +1,16 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
|
- [Standalone Mode](#standalone-mode)
|
||||||
|
- [Distributed Mode](#distributed-mode)
|
||||||
|
- [Frontend](#frontend)
|
||||||
|
- [Metasrv](#metasrv)
|
||||||
|
- [Datanode](#datanode)
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
{{ toml2docs "./standalone.example.toml" }}
|
{{ toml2docs "./standalone.example.toml" }}
|
||||||
|
|
||||||
## Cluster Mode
|
## Distributed Mode
|
||||||
|
|
||||||
### Frontend
|
### Frontend
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
# Configurations
|
# Configurations
|
||||||
|
|
||||||
|
- [Standalone Mode](#standalone-mode)
|
||||||
|
- [Distributed Mode](#distributed-mode)
|
||||||
|
- [Frontend](#frontend)
|
||||||
|
- [Metasrv](#metasrv)
|
||||||
|
- [Datanode](#datanode)
|
||||||
|
|
||||||
## Standalone Mode
|
## Standalone Mode
|
||||||
|
|
||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
@@ -14,6 +20,11 @@
|
|||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
@@ -27,7 +38,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
@@ -96,6 +107,10 @@
|
|||||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
@@ -127,9 +142,11 @@
|
|||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
## Cluster Mode
|
## Distributed Mode
|
||||||
|
|
||||||
### Frontend
|
### Frontend
|
||||||
|
|
||||||
@@ -147,6 +164,11 @@
|
|||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
|
| `grpc.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
|
| `grpc.tls.key_path` | String | `None` | Private key file path. |
|
||||||
|
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
|
||||||
| `mysql` | -- | -- | MySQL server options. |
|
| `mysql` | -- | -- | MySQL server options. |
|
||||||
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
| `mysql.enable` | Bool | `true` | Whether to enable. |
|
||||||
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
|
||||||
@@ -160,7 +182,7 @@
|
|||||||
| `postgres.enable` | Bool | `true` | Whether to enable |
|
| `postgres.enable` | Bool | `true` | Whether to enable |
|
||||||
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
|
||||||
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
|
||||||
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql_options.tls` section. |
|
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
|
||||||
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
| `postgres.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
| `postgres.tls.cert_path` | String | `None` | Certificate file path. |
|
||||||
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
| `postgres.tls.key_path` | String | `None` | Private key file path. |
|
||||||
@@ -184,7 +206,6 @@
|
|||||||
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
| `meta_client.metadata_cache_tti` | String | `5m` | -- |
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.timeout` | String | `10s` | -- |
|
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
@@ -203,6 +224,8 @@
|
|||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
### Metasrv
|
### Metasrv
|
||||||
@@ -259,6 +282,8 @@
|
|||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
|
|
||||||
|
|
||||||
### Datanode
|
### Datanode
|
||||||
@@ -339,6 +364,10 @@
|
|||||||
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
| `region_engine.mito.sst_meta_cache_size` | String | `128MB` | Cache size for SST metadata. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/32 of OS memory with a max limitation of 128MB. |
|
||||||
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.vector_cache_size` | String | `512MB` | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
| `region_engine.mito.page_cache_size` | String | `512MB` | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
|
||||||
|
| `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
|
||||||
|
| `region_engine.mito.experimental_write_cache_ttl` | String | `1h` | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
@@ -370,3 +399,5 @@
|
|||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
| `export_metrics.remote_write` | -- | -- | -- |
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=information_schema`. |
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
||||||
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
|
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
|
||||||
|
|||||||
@@ -324,6 +324,18 @@ vector_cache_size = "512MB"
|
|||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
page_cache_size = "512MB"
|
||||||
|
|
||||||
|
## Whether to enable the experimental write cache.
|
||||||
|
enable_experimental_write_cache = false
|
||||||
|
|
||||||
|
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||||
|
experimental_write_cache_path = ""
|
||||||
|
|
||||||
|
## Capacity for write cache.
|
||||||
|
experimental_write_cache_size = "512MB"
|
||||||
|
|
||||||
|
## TTL for write cache.
|
||||||
|
experimental_write_cache_ttl = "1h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
@@ -428,3 +440,9 @@ url = ""
|
|||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -30,6 +30,23 @@ addr = "127.0.0.1:4001"
|
|||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
[grpc.tls]
|
||||||
|
## TLS mode.
|
||||||
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
key_path = ""
|
||||||
|
|
||||||
|
## Watch for Certificate and key file change and auto reload.
|
||||||
|
## For now, gRPC tls config does not support auto reload.
|
||||||
|
watch = false
|
||||||
|
|
||||||
## MySQL server options.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
## Whether to enable.
|
## Whether to enable.
|
||||||
@@ -70,7 +87,7 @@ addr = "127.0.0.1:4003"
|
|||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
## TLS mode.
|
## TLS mode.
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
@@ -136,7 +153,6 @@ metadata_cache_tti = "5m"
|
|||||||
[datanode]
|
[datanode]
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
[datanode.client]
|
[datanode.client]
|
||||||
timeout = "10s"
|
|
||||||
connect_timeout = "10s"
|
connect_timeout = "10s"
|
||||||
tcp_nodelay = true
|
tcp_nodelay = true
|
||||||
|
|
||||||
@@ -186,3 +202,9 @@ url = ""
|
|||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -141,3 +141,9 @@ url = ""
|
|||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
@@ -25,6 +25,23 @@ addr = "127.0.0.1:4001"
|
|||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 8
|
runtime_size = 8
|
||||||
|
|
||||||
|
## gRPC server TLS options, see `mysql.tls` section.
|
||||||
|
[grpc.tls]
|
||||||
|
## TLS mode.
|
||||||
|
mode = "disable"
|
||||||
|
|
||||||
|
## Certificate file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
cert_path = ""
|
||||||
|
|
||||||
|
## Private key file path.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
key_path = ""
|
||||||
|
|
||||||
|
## Watch for Certificate and key file change and auto reload.
|
||||||
|
## For now, gRPC tls config does not support auto reload.
|
||||||
|
watch = false
|
||||||
|
|
||||||
## MySQL server options.
|
## MySQL server options.
|
||||||
[mysql]
|
[mysql]
|
||||||
## Whether to enable.
|
## Whether to enable.
|
||||||
@@ -65,7 +82,7 @@ addr = "127.0.0.1:4003"
|
|||||||
## The number of server worker threads.
|
## The number of server worker threads.
|
||||||
runtime_size = 2
|
runtime_size = 2
|
||||||
|
|
||||||
## PostgresSQL server TLS options, see `mysql_options.tls` section.
|
## PostgresSQL server TLS options, see `mysql.tls` section.
|
||||||
[postgres.tls]
|
[postgres.tls]
|
||||||
## TLS mode.
|
## TLS mode.
|
||||||
mode = "disable"
|
mode = "disable"
|
||||||
@@ -367,6 +384,18 @@ vector_cache_size = "512MB"
|
|||||||
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
## If not set, it's default to 1/16 of OS memory with a max limitation of 512MB.
|
||||||
page_cache_size = "512MB"
|
page_cache_size = "512MB"
|
||||||
|
|
||||||
|
## Whether to enable the experimental write cache.
|
||||||
|
enable_experimental_write_cache = false
|
||||||
|
|
||||||
|
## File system path for write cache, defaults to `{data_home}/write_cache`.
|
||||||
|
experimental_write_cache_path = ""
|
||||||
|
|
||||||
|
## Capacity for write cache.
|
||||||
|
experimental_write_cache_size = "512MB"
|
||||||
|
|
||||||
|
## TTL for write cache.
|
||||||
|
experimental_write_cache_ttl = "1h"
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
@@ -471,3 +500,9 @@ url = ""
|
|||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
## HTTP headers of Prometheus remote-write carry.
|
||||||
headers = { }
|
headers = { }
|
||||||
|
|
||||||
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
|
[tracing]
|
||||||
|
## The tokio console address.
|
||||||
|
## +toml2docs:none-default
|
||||||
|
tokio_console_addr = "127.0.0.1"
|
||||||
|
|||||||
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
106
cyborg/bin/follow-up-docs-issue.ts
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from '@actions/core'
|
||||||
|
import {handleError, obtainClient} from "@/common";
|
||||||
|
import {context} from "@actions/github";
|
||||||
|
import {PullRequestEditedEvent, PullRequestEvent, PullRequestOpenedEvent} from "@octokit/webhooks-types";
|
||||||
|
// @ts-expect-error moduleResolution:nodenext issue 54523
|
||||||
|
import {RequestError} from "@octokit/request-error";
|
||||||
|
|
||||||
|
const needFollowUpDocs = "[x] This PR requires documentation updates."
|
||||||
|
const labelDocsNotRequired = "docs-not-required"
|
||||||
|
const labelDocsRequired = "docs-required"
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
if (!context.payload.pull_request) {
|
||||||
|
throw new Error(`Only pull request event supported. ${context.eventName} is unsupported.`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const client = obtainClient("GITHUB_TOKEN")
|
||||||
|
const docsClient = obtainClient("DOCS_REPO_TOKEN")
|
||||||
|
const payload = context.payload as PullRequestEvent
|
||||||
|
const { owner, repo, number, actor, title, html_url } = {
|
||||||
|
owner: payload.pull_request.base.user.login,
|
||||||
|
repo: payload.pull_request.base.repo.name,
|
||||||
|
number: payload.pull_request.number,
|
||||||
|
title: payload.pull_request.title,
|
||||||
|
html_url: payload.pull_request.html_url,
|
||||||
|
actor: payload.pull_request.user.login,
|
||||||
|
}
|
||||||
|
const followUpDocs = checkPullRequestEvent(payload)
|
||||||
|
if (followUpDocs) {
|
||||||
|
core.info("Follow up docs.")
|
||||||
|
await client.rest.issues.removeLabel({
|
||||||
|
owner, repo, issue_number: number, name: labelDocsNotRequired,
|
||||||
|
}).catch((e: RequestError) => {
|
||||||
|
if (e.status != 404) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
core.debug(`Label ${labelDocsNotRequired} not exist.`)
|
||||||
|
})
|
||||||
|
await client.rest.issues.addLabels({
|
||||||
|
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||||
|
})
|
||||||
|
await docsClient.rest.issues.create({
|
||||||
|
owner: 'GreptimeTeam',
|
||||||
|
repo: 'docs',
|
||||||
|
title: `Update docs for ${title}`,
|
||||||
|
body: `A document change request is generated from ${html_url}`,
|
||||||
|
assignee: actor,
|
||||||
|
}).then((res) => {
|
||||||
|
core.info(`Created issue ${res.data}`)
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
core.info("No need to follow up docs.")
|
||||||
|
await client.rest.issues.removeLabel({
|
||||||
|
owner, repo, issue_number: number, name: labelDocsRequired
|
||||||
|
}).catch((e: RequestError) => {
|
||||||
|
if (e.status != 404) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
core.debug(`Label ${labelDocsRequired} not exist.`)
|
||||||
|
})
|
||||||
|
await client.rest.issues.addLabels({
|
||||||
|
owner, repo, issue_number: number, labels: [labelDocsNotRequired],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPullRequestEvent(payload: PullRequestEvent) {
|
||||||
|
switch (payload.action) {
|
||||||
|
case "opened":
|
||||||
|
return checkPullRequestOpenedEvent(payload as PullRequestOpenedEvent)
|
||||||
|
case "edited":
|
||||||
|
return checkPullRequestEditedEvent(payload as PullRequestEditedEvent)
|
||||||
|
default:
|
||||||
|
throw new Error(`${payload.action} is unsupported.`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPullRequestOpenedEvent(event: PullRequestOpenedEvent): boolean {
|
||||||
|
// @ts-ignore
|
||||||
|
return event.pull_request.body?.includes(needFollowUpDocs)
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPullRequestEditedEvent(event: PullRequestEditedEvent): boolean {
|
||||||
|
const previous = event.changes.body?.from.includes(needFollowUpDocs)
|
||||||
|
const current = event.pull_request.body?.includes(needFollowUpDocs)
|
||||||
|
// from docs-not-need to docs-required
|
||||||
|
return (!previous) && current
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(handleError)
|
||||||
83
cyborg/bin/report-ci-failure.ts
Normal file
83
cyborg/bin/report-ci-failure.ts
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2023 Greptime Team
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from '@actions/core'
|
||||||
|
import {handleError, obtainClient} from "@/common"
|
||||||
|
import {context} from "@actions/github"
|
||||||
|
import _ from "lodash"
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
const success = process.env["CI_REPORT_STATUS"] === "true"
|
||||||
|
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
|
||||||
|
|
||||||
|
const client = obtainClient("GITHUB_TOKEN")
|
||||||
|
const title = `Workflow run '${context.workflow}' failed`
|
||||||
|
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
|
||||||
|
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
|
||||||
|
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
||||||
|
|
||||||
|
const {owner, repo} = context.repo
|
||||||
|
const labels = ['O-ci-failure']
|
||||||
|
|
||||||
|
const issues = await client.paginate(client.rest.issues.listForRepo, {
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
labels: labels.join(','),
|
||||||
|
state: "open",
|
||||||
|
sort: "created",
|
||||||
|
direction: "desc",
|
||||||
|
});
|
||||||
|
const issue = _.find(issues, (i) => i.title === title);
|
||||||
|
|
||||||
|
if (issue) { // exist issue
|
||||||
|
core.info(`Found previous issue ${issue.html_url}`)
|
||||||
|
if (!success) {
|
||||||
|
await client.rest.issues.createComment({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: issue.number,
|
||||||
|
body: failure_comment,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
await client.rest.issues.createComment({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: issue.number,
|
||||||
|
body: success_comment,
|
||||||
|
})
|
||||||
|
await client.rest.issues.update({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
issue_number: issue.number,
|
||||||
|
state: "closed",
|
||||||
|
state_reason: "completed",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
core.setOutput("html_url", issue.html_url)
|
||||||
|
} else if (!success) { // create new issue for failure
|
||||||
|
const issue = await client.rest.issues.create({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
title,
|
||||||
|
labels,
|
||||||
|
body: failure_comment,
|
||||||
|
})
|
||||||
|
core.info(`Created issue ${issue.data.html_url}`)
|
||||||
|
core.setOutput("html_url", issue.data.html_url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(handleError)
|
||||||
@@ -7,6 +7,7 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@actions/core": "^1.10.1",
|
"@actions/core": "^1.10.1",
|
||||||
"@actions/github": "^6.0.0",
|
"@actions/github": "^6.0.0",
|
||||||
|
"@octokit/request-error": "^6.1.1",
|
||||||
"@octokit/webhooks-types": "^7.5.1",
|
"@octokit/webhooks-types": "^7.5.1",
|
||||||
"conventional-commit-types": "^3.0.0",
|
"conventional-commit-types": "^3.0.0",
|
||||||
"conventional-commits-parser": "^5.0.0",
|
"conventional-commits-parser": "^5.0.0",
|
||||||
|
|||||||
10
cyborg/pnpm-lock.yaml
generated
10
cyborg/pnpm-lock.yaml
generated
@@ -11,6 +11,9 @@ dependencies:
|
|||||||
'@actions/github':
|
'@actions/github':
|
||||||
specifier: ^6.0.0
|
specifier: ^6.0.0
|
||||||
version: 6.0.0
|
version: 6.0.0
|
||||||
|
'@octokit/request-error':
|
||||||
|
specifier: ^6.1.1
|
||||||
|
version: 6.1.1
|
||||||
'@octokit/webhooks-types':
|
'@octokit/webhooks-types':
|
||||||
specifier: ^7.5.1
|
specifier: ^7.5.1
|
||||||
version: 7.5.1
|
version: 7.5.1
|
||||||
@@ -359,6 +362,13 @@ packages:
|
|||||||
once: 1.4.0
|
once: 1.4.0
|
||||||
dev: false
|
dev: false
|
||||||
|
|
||||||
|
/@octokit/request-error@6.1.1:
|
||||||
|
resolution: {integrity: sha512-1mw1gqT3fR/WFvnoVpY/zUM2o/XkMs/2AszUUG9I69xn0JFLv6PGkPhNk5lbfvROs79wiS0bqiJNxfCZcRJJdg==}
|
||||||
|
engines: {node: '>= 18'}
|
||||||
|
dependencies:
|
||||||
|
'@octokit/types': 13.5.0
|
||||||
|
dev: false
|
||||||
|
|
||||||
/@octokit/request@8.4.0:
|
/@octokit/request@8.4.0:
|
||||||
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
resolution: {integrity: sha512-9Bb014e+m2TgBeEJGEbdplMVWwPmL1FPtggHQRkV+WVsMggPtEkLKPlcVYm/o8xKLkpJ7B+6N8WfQMtDLX2Dpw==}
|
||||||
engines: {node: '>= 18'}
|
engines: {node: '>= 18'}
|
||||||
|
|||||||
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
16
docker/ci/ubuntu/Dockerfile.fuzztests
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
|
# The binary name of GreptimeDB executable.
|
||||||
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
ARG TARGET_BIN=greptime
|
||||||
|
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||||
|
ca-certificates \
|
||||||
|
curl
|
||||||
|
|
||||||
|
ARG BINARY_PATH
|
||||||
|
ADD $BINARY_PATH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
|
ENV PATH /greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
@@ -34,7 +34,7 @@ RUN rustup toolchain install ${RUST_TOOLCHAIN}
|
|||||||
RUN rustup target add aarch64-linux-android
|
RUN rustup target add aarch64-linux-android
|
||||||
|
|
||||||
# Install cargo-ndk
|
# Install cargo-ndk
|
||||||
RUN cargo install cargo-ndk
|
RUN cargo install cargo-ndk@3.5.4
|
||||||
ENV ANDROID_NDK_HOME $NDK_ROOT
|
ENV ANDROID_NDK_HOME $NDK_ROOT
|
||||||
|
|
||||||
# Builder entrypoint.
|
# Builder entrypoint.
|
||||||
|
|||||||
@@ -23,28 +23,28 @@
|
|||||||
|
|
||||||
## Write performance
|
## Write performance
|
||||||
|
|
||||||
| Environment | Ingest rate (rows/s) |
|
| Environment | Ingest rate (rows/s) |
|
||||||
| ------------------ | --------------------- |
|
| --------------- | -------------------- |
|
||||||
| Local | 3695814.64 |
|
| Local | 369581.464 |
|
||||||
| EC2 c5d.2xlarge | 2987166.64 |
|
| EC2 c5d.2xlarge | 298716.664 |
|
||||||
|
|
||||||
|
|
||||||
## Query performance
|
## Query performance
|
||||||
|
|
||||||
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||||
| --------------------- | ---------- | ---------------------- |
|
| --------------------- | ---------- | -------------------- |
|
||||||
| cpu-max-all-1 | 30.56 | 54.74 |
|
| cpu-max-all-1 | 30.56 | 54.74 |
|
||||||
| cpu-max-all-8 | 52.69 | 70.50 |
|
| cpu-max-all-8 | 52.69 | 70.50 |
|
||||||
| double-groupby-1 | 664.30 | 1366.63 |
|
| double-groupby-1 | 664.30 | 1366.63 |
|
||||||
| double-groupby-5 | 1391.26 | 2141.71 |
|
| double-groupby-5 | 1391.26 | 2141.71 |
|
||||||
| double-groupby-all | 2828.94 | 3389.59 |
|
| double-groupby-all | 2828.94 | 3389.59 |
|
||||||
| groupby-orderby-limit | 718.92 | 1213.90 |
|
| groupby-orderby-limit | 718.92 | 1213.90 |
|
||||||
| high-cpu-1 | 29.21 | 52.98 |
|
| high-cpu-1 | 29.21 | 52.98 |
|
||||||
| high-cpu-all | 5514.12 | 7194.91 |
|
| high-cpu-all | 5514.12 | 7194.91 |
|
||||||
| lastpoint | 7571.40 | 9423.41 |
|
| lastpoint | 7571.40 | 9423.41 |
|
||||||
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
| single-groupby-1-1-1 | 19.09 | 7.77 |
|
||||||
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
| single-groupby-1-1-12 | 27.28 | 51.64 |
|
||||||
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
| single-groupby-1-8-1 | 31.85 | 11.64 |
|
||||||
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
| single-groupby-5-1-1 | 16.14 | 9.67 |
|
||||||
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
| single-groupby-5-1-12 | 27.21 | 53.62 |
|
||||||
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
| single-groupby-5-8-1 | 39.62 | 14.96 |
|
||||||
|
|||||||
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
58
docs/benchmarks/tsbs/v0.8.0.md
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# TSBS benchmark - v0.8.0
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
### Local
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------ | ---------------------------------- |
|
||||||
|
| CPU | AMD Ryzen 7 7735HS (8 core 3.2GHz) |
|
||||||
|
| Memory | 32GB |
|
||||||
|
| Disk | SOLIDIGM SSDPFKNU010TZ |
|
||||||
|
| OS | Ubuntu 22.04.2 LTS |
|
||||||
|
|
||||||
|
### Amazon EC2
|
||||||
|
|
||||||
|
| | |
|
||||||
|
| ------- | -------------- |
|
||||||
|
| Machine | c5d.2xlarge |
|
||||||
|
| CPU | 8 core |
|
||||||
|
| Memory | 16GB |
|
||||||
|
| Disk | 50GB (GP3) |
|
||||||
|
| OS | Ubuntu 22.04.1 |
|
||||||
|
|
||||||
|
## Write performance
|
||||||
|
|
||||||
|
| Environment | Ingest rate (rows/s) |
|
||||||
|
| --------------- | -------------------- |
|
||||||
|
| Local | 315369.66 |
|
||||||
|
| EC2 c5d.2xlarge | 222148.56 |
|
||||||
|
|
||||||
|
## Query performance
|
||||||
|
|
||||||
|
| Query type | Local (ms) | EC2 c5d.2xlarge (ms) |
|
||||||
|
| --------------------- | ---------- | -------------------- |
|
||||||
|
| cpu-max-all-1 | 24.63 | 15.29 |
|
||||||
|
| cpu-max-all-8 | 51.69 | 33.53 |
|
||||||
|
| double-groupby-1 | 673.51 | 1295.38 |
|
||||||
|
| double-groupby-5 | 1244.93 | 1993.91 |
|
||||||
|
| double-groupby-all | 2215.44 | 3056.77 |
|
||||||
|
| groupby-orderby-limit | 754.50 | 1546.49 |
|
||||||
|
| high-cpu-1 | 19.62 | 11.58 |
|
||||||
|
| high-cpu-all | 5402.31 | 8011.43 |
|
||||||
|
| lastpoint | 6756.12 | 9312.67 |
|
||||||
|
| single-groupby-1-1-1 | 15.70 | 7.67 |
|
||||||
|
| single-groupby-1-1-12 | 16.72 | 9.29 |
|
||||||
|
| single-groupby-1-8-1 | 26.72 | 17.97 |
|
||||||
|
| single-groupby-5-1-1 | 18.17 | 10.09 |
|
||||||
|
| single-groupby-5-1-12 | 20.04 | 12.37 |
|
||||||
|
| single-groupby-5-8-1 | 35.63 | 23.13 |
|
||||||
|
|
||||||
|
`single-groupby-1-1-1` query throughput
|
||||||
|
|
||||||
|
| Environment | Client concurrency | mean time (ms) | qps (queries/sec) |
|
||||||
|
| --------------- | ------------------ | -------------- | ----------------- |
|
||||||
|
| Local | 50 | 42.87 | 1165.73 |
|
||||||
|
| Local | 100 | 89.29 | 1119.38 |
|
||||||
|
| EC2 c5d.2xlarge | 50 | 69.25 | 721.73 |
|
||||||
|
| EC2 c5d.2xlarge | 100 | 140.93 | 709.35 |
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "nightly-2024-04-18"
|
channel = "nightly-2024-04-20"
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
#[snafu(display("Unknown proto column datatype: {}", datatype))]
|
||||||
UnknownColumnDataType {
|
UnknownColumnDataType {
|
||||||
datatype: i32,
|
datatype: i32,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: prost::DecodeError,
|
error: prost::DecodeError,
|
||||||
@@ -38,12 +39,14 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
#[snafu(display("Failed to create column datatype from {:?}", from))]
|
||||||
IntoColumnDataType {
|
IntoColumnDataType {
|
||||||
from: ConcreteDataType,
|
from: ConcreteDataType,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
#[snafu(display("Failed to convert column default constraint, column: {}", column))]
|
||||||
ConvertColumnDefaultConstraint {
|
ConvertColumnDefaultConstraint {
|
||||||
column: String,
|
column: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: datatypes::error::Error,
|
source: datatypes::error::Error,
|
||||||
},
|
},
|
||||||
@@ -51,6 +54,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
#[snafu(display("Invalid column default constraint, column: {}", column))]
|
||||||
InvalidColumnDefaultConstraint {
|
InvalidColumnDefaultConstraint {
|
||||||
column: String,
|
column: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: datatypes::error::Error,
|
source: datatypes::error::Error,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -480,6 +480,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
|
|||||||
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
|
||||||
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
Some(Expr::CreateFlow(_)) => "ddl.create_flow",
|
||||||
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
Some(Expr::DropFlow(_)) => "ddl.drop_flow",
|
||||||
|
Some(Expr::CreateView(_)) => "ddl.create_view",
|
||||||
|
Some(Expr::DropView(_)) => "ddl.drop_view",
|
||||||
None => "ddl.empty",
|
None => "ddl.empty",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,11 +34,13 @@ pub enum Error {
|
|||||||
Io {
|
Io {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: std::io::Error,
|
error: std::io::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Auth failed"))]
|
#[snafu(display("Auth failed"))]
|
||||||
AuthBackend {
|
AuthBackend {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
@@ -72,7 +74,10 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("User is not authorized to perform this action"))]
|
#[snafu(display("User is not authorized to perform this action"))]
|
||||||
PermissionDenied { location: Location },
|
PermissionDenied {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
|
|||||||
14
src/cache/Cargo.toml
vendored
Normal file
14
src/cache/Cargo.toml
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[package]
|
||||||
|
name = "cache"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
catalog.workspace = true
|
||||||
|
common-error.workspace = true
|
||||||
|
common-macro.workspace = true
|
||||||
|
common-meta.workspace = true
|
||||||
|
moka.workspace = true
|
||||||
|
snafu.workspace = true
|
||||||
|
substrait.workspace = true
|
||||||
44
src/cache/src/error.rs
vendored
Normal file
44
src/cache/src/error.rs
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use common_error::ext::ErrorExt;
|
||||||
|
use common_error::status_code::StatusCode;
|
||||||
|
use common_macro::stack_trace_debug;
|
||||||
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
|
#[derive(Snafu)]
|
||||||
|
#[snafu(visibility(pub))]
|
||||||
|
#[stack_trace_debug]
|
||||||
|
pub enum Error {
|
||||||
|
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||||
|
CacheRequired {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
name: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
impl ErrorExt for Error {
|
||||||
|
fn status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::CacheRequired { .. } => StatusCode::Internal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any(&self) -> &dyn std::any::Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
135
src/cache/src/lib.rs
vendored
Normal file
135
src/cache/src/lib.rs
vendored
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod error;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use catalog::kvbackend::new_table_cache;
|
||||||
|
use common_meta::cache::{
|
||||||
|
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||||
|
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||||
|
LayeredCacheRegistryBuilder,
|
||||||
|
};
|
||||||
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
|
use moka::future::CacheBuilder;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
const DEFAULT_CACHE_MAX_CAPACITY: u64 = 65536;
|
||||||
|
const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
||||||
|
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||||
|
|
||||||
|
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||||
|
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||||
|
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||||
|
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||||
|
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||||
|
pub const TABLE_ROUTE_CACHE_NAME: &str = "table_route_cache";
|
||||||
|
|
||||||
|
pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegistry {
|
||||||
|
// Builds table info cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let table_info_cache = Arc::new(new_table_info_cache(
|
||||||
|
TABLE_INFO_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
// Builds table name cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let table_name_cache = Arc::new(new_table_name_cache(
|
||||||
|
TABLE_NAME_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
// Builds table route cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let table_route_cache = Arc::new(new_table_route_cache(
|
||||||
|
TABLE_ROUTE_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
// Builds table flownode set cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let table_flownode_set_cache = Arc::new(new_table_flownode_set_cache(
|
||||||
|
TABLE_FLOWNODE_SET_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
// Builds the view info cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let view_info_cache = Arc::new(new_view_info_cache(
|
||||||
|
VIEW_INFO_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
CacheRegistryBuilder::default()
|
||||||
|
.add_cache(table_info_cache)
|
||||||
|
.add_cache(table_name_cache)
|
||||||
|
.add_cache(table_route_cache)
|
||||||
|
.add_cache(view_info_cache)
|
||||||
|
.add_cache(table_flownode_set_cache)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(weny): Make the cache configurable.
|
||||||
|
pub fn with_default_composite_cache_registry(
|
||||||
|
builder: LayeredCacheRegistryBuilder,
|
||||||
|
) -> Result<LayeredCacheRegistryBuilder> {
|
||||||
|
let table_info_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||||
|
name: TABLE_INFO_CACHE_NAME,
|
||||||
|
})?;
|
||||||
|
let table_name_cache = builder.get().context(error::CacheRequiredSnafu {
|
||||||
|
name: TABLE_NAME_CACHE_NAME,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Builds table cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let table_cache = Arc::new(new_table_cache(
|
||||||
|
TABLE_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
table_info_cache,
|
||||||
|
table_name_cache,
|
||||||
|
));
|
||||||
|
|
||||||
|
let registry = CacheRegistryBuilder::default()
|
||||||
|
.add_cache(table_cache)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
Ok(builder.add_cache_registry(registry))
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ arrow.workspace = true
|
|||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -48,8 +49,11 @@ table.workspace = true
|
|||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
cache.workspace = true
|
||||||
catalog = { workspace = true, features = ["testing"] }
|
catalog = { workspace = true, features = ["testing"] }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
|
common-meta = { workspace = true, features = ["testing"] }
|
||||||
|
common-query = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
log-store.workspace = true
|
log-store.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
|
|||||||
@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
use table::metadata::TableId;
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
@@ -30,12 +27,14 @@ use tokio::task::JoinError;
|
|||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Failed to list catalogs"))]
|
#[snafu(display("Failed to list catalogs"))]
|
||||||
ListCatalogs {
|
ListCatalogs {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
#[snafu(display("Failed to list {}'s schemas", catalog))]
|
||||||
ListSchemas {
|
ListSchemas {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
catalog: String,
|
catalog: String,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
@@ -43,6 +42,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
#[snafu(display("Failed to list {}.{}'s tables", catalog, schema))]
|
||||||
ListTables {
|
ListTables {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
catalog: String,
|
catalog: String,
|
||||||
schema: String,
|
schema: String,
|
||||||
@@ -51,78 +51,37 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
#[snafu(display("Failed to list nodes in cluster: {source}"))]
|
||||||
ListNodes {
|
ListNodes {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to re-compile script due to internal error"))]
|
#[snafu(display("Failed to re-compile script due to internal error"))]
|
||||||
CompileScriptInternal {
|
CompileScriptInternal {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
#[snafu(display("Failed to open system catalog table"))]
|
|
||||||
OpenSystemCatalog {
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create system catalog table"))]
|
|
||||||
CreateSystemCatalog {
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
table_info: String,
|
table_info: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: table::error::Error,
|
source: table::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("System catalog is not valid: {}", msg))]
|
#[snafu(display("System catalog is not valid: {}", msg))]
|
||||||
SystemCatalog { msg: String, location: Location },
|
SystemCatalog {
|
||||||
|
msg: String,
|
||||||
#[snafu(display(
|
#[snafu(implicit)]
|
||||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
|
||||||
data_type,
|
|
||||||
))]
|
|
||||||
SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType,
|
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
|
||||||
InvalidEntryType {
|
|
||||||
entry_type: Option<u8>,
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
|
||||||
InvalidKey {
|
|
||||||
key: Option<String>,
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Catalog value is not present"))]
|
|
||||||
EmptyValue { location: Location },
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize value"))]
|
|
||||||
ValueDeserialize {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: serde_json::error::Error,
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
|
||||||
TableEngineNotFound {
|
|
||||||
engine_name: String,
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||||
CatalogNotFound {
|
CatalogNotFound {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -130,43 +89,28 @@ pub enum Error {
|
|||||||
SchemaNotFound {
|
SchemaNotFound {
|
||||||
catalog: String,
|
catalog: String,
|
||||||
schema: String,
|
schema: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table `{}` already exists", table))]
|
#[snafu(display("Table `{}` already exists", table))]
|
||||||
TableExists { table: String, location: Location },
|
TableExists {
|
||||||
|
table: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Table not found: {}", table))]
|
#[snafu(display("Table not found: {}", table))]
|
||||||
TableNotExist { table: String, location: Location },
|
TableNotExist {
|
||||||
|
table: String,
|
||||||
#[snafu(display("Schema {} already exists", schema))]
|
#[snafu(implicit)]
|
||||||
SchemaExists { schema: String, location: Location },
|
|
||||||
|
|
||||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
|
||||||
Unimplemented {
|
|
||||||
operation: String,
|
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Operation {} not supported", op))]
|
#[snafu(display("View info not found: {}", name))]
|
||||||
NotSupported { op: String, location: Location },
|
ViewInfoNotFound {
|
||||||
|
name: String,
|
||||||
#[snafu(display("Failed to open table {table_id}"))]
|
#[snafu(implicit)]
|
||||||
OpenTable {
|
|
||||||
table_id: TableId,
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to open table in parallel"))]
|
|
||||||
ParallelOpenTable {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: JoinError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
|
||||||
TableNotFound {
|
|
||||||
table_info: String,
|
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -176,59 +120,44 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to find region routes"))]
|
#[snafu(display("Failed to find region routes"))]
|
||||||
FindRegionRoutes { source: partition::error::Error },
|
FindRegionRoutes { source: partition::error::Error },
|
||||||
|
|
||||||
#[snafu(display("Failed to read system catalog table records"))]
|
|
||||||
ReadSystemCatalog {
|
|
||||||
location: Location,
|
|
||||||
source: common_recordbatch::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create recordbatch"))]
|
#[snafu(display("Failed to create recordbatch"))]
|
||||||
CreateRecordBatch {
|
CreateRecordBatch {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_recordbatch::error::Error,
|
source: common_recordbatch::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
|
||||||
InsertCatalogRecord {
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to scan system catalog table"))]
|
|
||||||
SystemCatalogTableScan {
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Internal error"))]
|
#[snafu(display("Internal error"))]
|
||||||
Internal {
|
Internal {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
#[snafu(display("Failed to upgrade weak catalog manager reference"))]
|
||||||
UpgradeWeakCatalogManagerRef { location: Location },
|
UpgradeWeakCatalogManagerRef {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||||
SystemCatalogTableScanExec {
|
DecodePlan {
|
||||||
|
name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_query::error::Error,
|
source: common_query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Cannot parse catalog value"))]
|
|
||||||
InvalidCatalogValue {
|
|
||||||
location: Location,
|
|
||||||
source: common_catalog::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to perform metasrv operation"))]
|
#[snafu(display("Failed to perform metasrv operation"))]
|
||||||
Metasrv {
|
Metasrv {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_client::error::Error,
|
source: meta_client::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid table info in catalog"))]
|
#[snafu(display("Invalid table info in catalog"))]
|
||||||
InvalidTableInfoInCatalog {
|
InvalidTableInfoInCatalog {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: datatypes::error::Error,
|
source: datatypes::error::Error,
|
||||||
},
|
},
|
||||||
@@ -240,29 +169,43 @@ pub enum Error {
|
|||||||
Datafusion {
|
Datafusion {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: DataFusionError,
|
error: DataFusionError,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table schema mismatch"))]
|
|
||||||
TableSchemaMismatch {
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
|
||||||
Generic { msg: String, location: Location },
|
|
||||||
|
|
||||||
#[snafu(display("Table metadata manager error"))]
|
#[snafu(display("Table metadata manager error"))]
|
||||||
TableMetadataManager {
|
TableMetadataManager {
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Get null from table cache, key: {}", key))]
|
#[snafu(display("Failed to get table cache"))]
|
||||||
TableCacheNotGet { key: String, location: Location },
|
GetTableCache {
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to get table cache, err: {}", err_msg))]
|
#[snafu(display("Failed to get view info from cache"))]
|
||||||
GetTableCache { err_msg: String },
|
GetViewCache {
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Cache not found: {name}"))]
|
||||||
|
CacheNotFound {
|
||||||
|
name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to cast the catalog manager"))]
|
||||||
|
CastManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -270,61 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::InvalidKey { .. }
|
Error::SchemaNotFound { .. }
|
||||||
| Error::SchemaNotFound { .. }
|
|
||||||
| Error::CatalogNotFound { .. }
|
| Error::CatalogNotFound { .. }
|
||||||
| Error::FindPartitions { .. }
|
| Error::FindPartitions { .. }
|
||||||
| Error::FindRegionRoutes { .. }
|
| Error::FindRegionRoutes { .. }
|
||||||
| Error::InvalidEntryType { .. }
|
| Error::CacheNotFound { .. }
|
||||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||||
|
|
||||||
Error::SystemCatalog { .. }
|
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||||
| Error::EmptyValue { .. }
|
|
||||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
|
||||||
|
|
||||||
Error::Generic { .. }
|
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||||
| Error::SystemCatalogTypeMismatch { .. }
|
|
||||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
|
||||||
|
|
||||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
|
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
|
||||||
StatusCode::InvalidArguments
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::ListCatalogs { source, .. }
|
Error::ListCatalogs { source, .. }
|
||||||
| Error::ListNodes { source, .. }
|
| Error::ListNodes { source, .. }
|
||||||
| Error::ListSchemas { source, .. }
|
| Error::ListSchemas { source, .. }
|
||||||
| Error::ListTables { source, .. } => source.status_code(),
|
| Error::ListTables { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::OpenSystemCatalog { source, .. }
|
Error::CreateTable { source, .. } => source.status_code(),
|
||||||
| Error::CreateSystemCatalog { source, .. }
|
|
||||||
| Error::InsertCatalogRecord { source, .. }
|
|
||||||
| Error::OpenTable { source, .. }
|
|
||||||
| Error::CreateTable { source, .. }
|
|
||||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
Error::Metasrv { source, .. } => source.status_code(),
|
Error::Metasrv { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
|
|
||||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||||
Error::TableCacheNotGet { .. } | Error::GetTableCache { .. } => StatusCode::Internal,
|
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||||
|
source.status_code()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -356,11 +281,6 @@ mod tests {
|
|||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Unexpected,
|
|
||||||
InvalidKeySnafu { key: None }.build().status_code()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
StatusCode::StorageUnavailable,
|
StatusCode::StorageUnavailable,
|
||||||
Error::SystemCatalog {
|
Error::SystemCatalog {
|
||||||
@@ -369,19 +289,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Internal,
|
|
||||||
Error::SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType::binary_datatype(),
|
|
||||||
location: Location::generate(),
|
|
||||||
}
|
|
||||||
.status_code()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::StorageUnavailable,
|
|
||||||
EmptyValueSnafu {}.build().status_code()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -21,11 +21,11 @@ use common_config::Mode;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
use common_meta::cluster::{ClusterInfo, NodeInfo, NodeStatus};
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_telemetry::warn;
|
use common_telemetry::warn;
|
||||||
use common_time::timestamp::Timestamp;
|
use common_time::timestamp::Timestamp;
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ use common_catalog::consts::{
|
|||||||
SEMANTIC_TYPE_TIME_INDEX,
|
SEMANTIC_TYPE_TIME_INDEX,
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_KEY_COLUMN_USAGE_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -18,10 +18,10 @@ use std::sync::{Arc, Weak};
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::datetime::DateTime;
|
use common_time::datetime::DateTime;
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -14,10 +14,9 @@
|
|||||||
|
|
||||||
use arrow::array::StringArray;
|
use arrow::array::StringArray;
|
||||||
use arrow::compute::kernels::comparison;
|
use arrow::compute::kernels::comparison;
|
||||||
use common_query::logical_plan::DfExpr;
|
|
||||||
use datafusion::common::ScalarValue;
|
use datafusion::common::ScalarValue;
|
||||||
use datafusion::logical_expr::expr::Like;
|
use datafusion::logical_expr::expr::Like;
|
||||||
use datafusion::logical_expr::Operator;
|
use datafusion::logical_expr::{Expr, Operator};
|
||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use store_api::storage::ScanRequest;
|
use store_api::storage::ScanRequest;
|
||||||
|
|
||||||
@@ -118,12 +117,12 @@ impl Predicate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
|
/// Try to create a predicate from datafusion [`Expr`], return None if fails.
|
||||||
fn from_expr(expr: DfExpr) -> Option<Predicate> {
|
fn from_expr(expr: Expr) -> Option<Predicate> {
|
||||||
match expr {
|
match expr {
|
||||||
// NOT expr
|
// NOT expr
|
||||||
DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
Expr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
|
||||||
// expr LIKE pattern
|
// expr LIKE pattern
|
||||||
DfExpr::Like(Like {
|
Expr::Like(Like {
|
||||||
negated,
|
negated,
|
||||||
expr,
|
expr,
|
||||||
pattern,
|
pattern,
|
||||||
@@ -131,10 +130,10 @@ impl Predicate {
|
|||||||
..
|
..
|
||||||
}) if is_column(&expr) && is_string_literal(&pattern) => {
|
}) if is_column(&expr) && is_string_literal(&pattern) => {
|
||||||
// Safety: ensured by gurad
|
// Safety: ensured by gurad
|
||||||
let DfExpr::Column(c) = *expr else {
|
let Expr::Column(c) = *expr else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
let DfExpr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
let Expr::Literal(ScalarValue::Utf8(Some(pattern))) = *pattern else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -147,10 +146,10 @@ impl Predicate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// left OP right
|
// left OP right
|
||||||
DfExpr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
Expr::BinaryExpr(bin) => match (*bin.left, bin.op, *bin.right) {
|
||||||
// left == right
|
// left == right
|
||||||
(DfExpr::Literal(scalar), Operator::Eq, DfExpr::Column(c))
|
(Expr::Literal(scalar), Operator::Eq, Expr::Column(c))
|
||||||
| (DfExpr::Column(c), Operator::Eq, DfExpr::Literal(scalar)) => {
|
| (Expr::Column(c), Operator::Eq, Expr::Literal(scalar)) => {
|
||||||
let Ok(v) = Value::try_from(scalar) else {
|
let Ok(v) = Value::try_from(scalar) else {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
@@ -158,8 +157,8 @@ impl Predicate {
|
|||||||
Some(Predicate::Eq(c.name, v))
|
Some(Predicate::Eq(c.name, v))
|
||||||
}
|
}
|
||||||
// left != right
|
// left != right
|
||||||
(DfExpr::Literal(scalar), Operator::NotEq, DfExpr::Column(c))
|
(Expr::Literal(scalar), Operator::NotEq, Expr::Column(c))
|
||||||
| (DfExpr::Column(c), Operator::NotEq, DfExpr::Literal(scalar)) => {
|
| (Expr::Column(c), Operator::NotEq, Expr::Literal(scalar)) => {
|
||||||
let Ok(v) = Value::try_from(scalar) else {
|
let Ok(v) = Value::try_from(scalar) else {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
@@ -183,14 +182,14 @@ impl Predicate {
|
|||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
// [NOT] IN (LIST)
|
// [NOT] IN (LIST)
|
||||||
DfExpr::InList(list) => {
|
Expr::InList(list) => {
|
||||||
match (*list.expr, list.list, list.negated) {
|
match (*list.expr, list.list, list.negated) {
|
||||||
// column [NOT] IN (v1, v2, v3, ...)
|
// column [NOT] IN (v1, v2, v3, ...)
|
||||||
(DfExpr::Column(c), list, negated) if is_all_scalars(&list) => {
|
(Expr::Column(c), list, negated) if is_all_scalars(&list) => {
|
||||||
let mut values = Vec::with_capacity(list.len());
|
let mut values = Vec::with_capacity(list.len());
|
||||||
for scalar in list {
|
for scalar in list {
|
||||||
// Safety: checked by `is_all_scalars`
|
// Safety: checked by `is_all_scalars`
|
||||||
let DfExpr::Literal(scalar) = scalar else {
|
let Expr::Literal(scalar) = scalar else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -237,12 +236,12 @@ fn like_utf8(s: &str, pattern: &str, case_insensitive: &bool) -> Option<bool> {
|
|||||||
Some(booleans.value(0))
|
Some(booleans.value(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_string_literal(expr: &DfExpr) -> bool {
|
fn is_string_literal(expr: &Expr) -> bool {
|
||||||
matches!(expr, DfExpr::Literal(ScalarValue::Utf8(Some(_))))
|
matches!(expr, Expr::Literal(ScalarValue::Utf8(Some(_))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_column(expr: &DfExpr) -> bool {
|
fn is_column(expr: &Expr) -> bool {
|
||||||
matches!(expr, DfExpr::Column(_))
|
matches!(expr, Expr::Column(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A list of predicate
|
/// A list of predicate
|
||||||
@@ -257,7 +256,7 @@ impl Predicates {
|
|||||||
let mut predicates = Vec::with_capacity(request.filters.len());
|
let mut predicates = Vec::with_capacity(request.filters.len());
|
||||||
|
|
||||||
for filter in &request.filters {
|
for filter in &request.filters {
|
||||||
if let Some(predicate) = Predicate::from_expr(filter.df_expr().clone()) {
|
if let Some(predicate) = Predicate::from_expr(filter.clone()) {
|
||||||
predicates.push(predicate);
|
predicates.push(predicate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -286,8 +285,8 @@ impl Predicates {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true when the values are all [`DfExpr::Literal`].
|
/// Returns true when the values are all [`DfExpr::Literal`].
|
||||||
fn is_all_scalars(list: &[DfExpr]) -> bool {
|
fn is_all_scalars(list: &[Expr]) -> bool {
|
||||||
list.iter().all(|v| matches!(v, DfExpr::Literal(_)))
|
list.iter().all(|v| matches!(v, Expr::Literal(_)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -376,7 +375,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_predicate_like() {
|
fn test_predicate_like() {
|
||||||
// case insensitive
|
// case insensitive
|
||||||
let expr = DfExpr::Like(Like {
|
let expr = Expr::Like(Like {
|
||||||
negated: false,
|
negated: false,
|
||||||
expr: Box::new(column("a")),
|
expr: Box::new(column("a")),
|
||||||
pattern: Box::new(string_literal("%abc")),
|
pattern: Box::new(string_literal("%abc")),
|
||||||
@@ -403,7 +402,7 @@ mod tests {
|
|||||||
assert!(p.eval(&[]).is_none());
|
assert!(p.eval(&[]).is_none());
|
||||||
|
|
||||||
// case sensitive
|
// case sensitive
|
||||||
let expr = DfExpr::Like(Like {
|
let expr = Expr::Like(Like {
|
||||||
negated: false,
|
negated: false,
|
||||||
expr: Box::new(column("a")),
|
expr: Box::new(column("a")),
|
||||||
pattern: Box::new(string_literal("%abc")),
|
pattern: Box::new(string_literal("%abc")),
|
||||||
@@ -423,7 +422,7 @@ mod tests {
|
|||||||
assert!(p.eval(&[]).is_none());
|
assert!(p.eval(&[]).is_none());
|
||||||
|
|
||||||
// not like
|
// not like
|
||||||
let expr = DfExpr::Like(Like {
|
let expr = Expr::Like(Like {
|
||||||
negated: true,
|
negated: true,
|
||||||
expr: Box::new(column("a")),
|
expr: Box::new(column("a")),
|
||||||
pattern: Box::new(string_literal("%abc")),
|
pattern: Box::new(string_literal("%abc")),
|
||||||
@@ -437,15 +436,15 @@ mod tests {
|
|||||||
assert!(p.eval(&[]).is_none());
|
assert!(p.eval(&[]).is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn column(name: &str) -> DfExpr {
|
fn column(name: &str) -> Expr {
|
||||||
DfExpr::Column(Column {
|
Expr::Column(Column {
|
||||||
relation: None,
|
relation: None,
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn string_literal(v: &str) -> DfExpr {
|
fn string_literal(v: &str) -> Expr {
|
||||||
DfExpr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
Expr::Literal(ScalarValue::Utf8(Some(v.to_string())))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn match_string_value(v: &Value, expected: &str) -> bool {
|
fn match_string_value(v: &Value, expected: &str) -> bool {
|
||||||
@@ -463,14 +462,14 @@ mod tests {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mock_exprs() -> (DfExpr, DfExpr) {
|
fn mock_exprs() -> (Expr, Expr) {
|
||||||
let expr1 = DfExpr::BinaryExpr(BinaryExpr {
|
let expr1 = Expr::BinaryExpr(BinaryExpr {
|
||||||
left: Box::new(column("a")),
|
left: Box::new(column("a")),
|
||||||
op: Operator::Eq,
|
op: Operator::Eq,
|
||||||
right: Box::new(string_literal("a_value")),
|
right: Box::new(string_literal("a_value")),
|
||||||
});
|
});
|
||||||
|
|
||||||
let expr2 = DfExpr::BinaryExpr(BinaryExpr {
|
let expr2 = Expr::BinaryExpr(BinaryExpr {
|
||||||
left: Box::new(column("b")),
|
left: Box::new(column("b")),
|
||||||
op: Operator::NotEq,
|
op: Operator::NotEq,
|
||||||
right: Box::new(string_literal("b_value")),
|
right: Box::new(string_literal("b_value")),
|
||||||
@@ -491,17 +490,17 @@ mod tests {
|
|||||||
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
|
assert!(matches!(&p2, Predicate::NotEq(column, v) if column == "b"
|
||||||
&& match_string_value(v, "b_value")));
|
&& match_string_value(v, "b_value")));
|
||||||
|
|
||||||
let and_expr = DfExpr::BinaryExpr(BinaryExpr {
|
let and_expr = Expr::BinaryExpr(BinaryExpr {
|
||||||
left: Box::new(expr1.clone()),
|
left: Box::new(expr1.clone()),
|
||||||
op: Operator::And,
|
op: Operator::And,
|
||||||
right: Box::new(expr2.clone()),
|
right: Box::new(expr2.clone()),
|
||||||
});
|
});
|
||||||
let or_expr = DfExpr::BinaryExpr(BinaryExpr {
|
let or_expr = Expr::BinaryExpr(BinaryExpr {
|
||||||
left: Box::new(expr1.clone()),
|
left: Box::new(expr1.clone()),
|
||||||
op: Operator::Or,
|
op: Operator::Or,
|
||||||
right: Box::new(expr2.clone()),
|
right: Box::new(expr2.clone()),
|
||||||
});
|
});
|
||||||
let not_expr = DfExpr::Not(Box::new(expr1.clone()));
|
let not_expr = Expr::Not(Box::new(expr1.clone()));
|
||||||
|
|
||||||
let and_p = Predicate::from_expr(and_expr).unwrap();
|
let and_p = Predicate::from_expr(and_expr).unwrap();
|
||||||
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
|
assert!(matches!(and_p, Predicate::And(left, right) if *left == p1 && *right == p2));
|
||||||
@@ -510,7 +509,7 @@ mod tests {
|
|||||||
let not_p = Predicate::from_expr(not_expr).unwrap();
|
let not_p = Predicate::from_expr(not_expr).unwrap();
|
||||||
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
|
assert!(matches!(not_p, Predicate::Not(p) if *p == p1));
|
||||||
|
|
||||||
let inlist_expr = DfExpr::InList(InList {
|
let inlist_expr = Expr::InList(InList {
|
||||||
expr: Box::new(column("a")),
|
expr: Box::new(column("a")),
|
||||||
list: vec![string_literal("a1"), string_literal("a2")],
|
list: vec![string_literal("a1"), string_literal("a2")],
|
||||||
negated: false,
|
negated: false,
|
||||||
@@ -520,7 +519,7 @@ mod tests {
|
|||||||
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
|
assert!(matches!(&inlist_p, Predicate::InList(c, values) if c == "a"
|
||||||
&& match_string_values(values, &["a1", "a2"])));
|
&& match_string_values(values, &["a1", "a2"])));
|
||||||
|
|
||||||
let inlist_expr = DfExpr::InList(InList {
|
let inlist_expr = Expr::InList(InList {
|
||||||
expr: Box::new(column("a")),
|
expr: Box::new(column("a")),
|
||||||
list: vec![string_literal("a1"), string_literal("a2")],
|
list: vec![string_literal("a1"), string_literal("a2")],
|
||||||
negated: true,
|
negated: true,
|
||||||
@@ -540,7 +539,7 @@ mod tests {
|
|||||||
let (expr1, expr2) = mock_exprs();
|
let (expr1, expr2) = mock_exprs();
|
||||||
|
|
||||||
let request = ScanRequest {
|
let request = ScanRequest {
|
||||||
filters: vec![expr1.into(), expr2.into()],
|
filters: vec![expr1, expr2],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let predicates = Predicates::from_scan_request(&Some(request));
|
let predicates = Predicates::from_scan_request(&Some(request));
|
||||||
@@ -578,7 +577,7 @@ mod tests {
|
|||||||
|
|
||||||
let (expr1, expr2) = mock_exprs();
|
let (expr1, expr2) = mock_exprs();
|
||||||
let request = ScanRequest {
|
let request = ScanRequest {
|
||||||
filters: vec![expr1.into(), expr2.into()],
|
filters: vec![expr1, expr2],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let predicates = Predicates::from_scan_request(&Some(request));
|
let predicates = Predicates::from_scan_request(&Some(request));
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ use arrow_schema::SchemaRef as ArrowSchemaRef;
|
|||||||
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_REGION_PEERS_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::rpc::router::RegionRoute;
|
use common_meta::rpc::router::RegionRoute;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,10 +17,10 @@ use std::sync::Arc;
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
use common_time::util::current_time_millis;
|
use common_time::util::current_time_millis;
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_TABLE_CONSTRAINTS_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ use std::sync::{Arc, Weak};
|
|||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_query::physical_plan::TaskContext;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||||
|
use datafusion::execution::TaskContext;
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||||
|
|||||||
@@ -16,5 +16,7 @@ pub use client::{CachedMetaKvBackend, CachedMetaKvBackendBuilder, MetaKvBackend}
|
|||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
mod manager;
|
mod manager;
|
||||||
|
mod table_cache;
|
||||||
|
|
||||||
pub use manager::KvBackendCatalogManager;
|
pub use manager::KvBackendCatalogManager;
|
||||||
|
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||||
|
|||||||
@@ -350,6 +350,13 @@ pub struct MetaKvBackend {
|
|||||||
pub client: Arc<MetaClient>,
|
pub client: Arc<MetaClient>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MetaKvBackend {
|
||||||
|
/// Constructs a [MetaKvBackend].
|
||||||
|
pub fn new(client: Arc<MetaClient>) -> MetaKvBackend {
|
||||||
|
MetaKvBackend { client }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl TxnService for MetaKvBackend {
|
impl TxnService for MetaKvBackend {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
}
|
}
|
||||||
@@ -450,9 +457,8 @@ mod tests {
|
|||||||
use common_meta::kv_backend::{KvBackend, TxnService};
|
use common_meta::kv_backend::{KvBackend, TxnService};
|
||||||
use common_meta::rpc::store::{
|
use common_meta::rpc::store::{
|
||||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
|
||||||
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse,
|
BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest,
|
||||||
DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest,
|
PutResponse, RangeRequest, RangeResponse,
|
||||||
RangeResponse,
|
|
||||||
};
|
};
|
||||||
use common_meta::rpc::KeyValue;
|
use common_meta::rpc::KeyValue;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
@@ -512,13 +518,6 @@ mod tests {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn compare_and_put(
|
|
||||||
&self,
|
|
||||||
_req: CompareAndPutRequest,
|
|
||||||
) -> Result<CompareAndPutResponse, Self::Error> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_range(
|
async fn delete_range(
|
||||||
&self,
|
&self,
|
||||||
_req: DeleteRangeRequest,
|
_req: DeleteRangeRequest,
|
||||||
|
|||||||
@@ -15,17 +15,14 @@
|
|||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::{
|
||||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
||||||
};
|
};
|
||||||
use common_catalog::format_full_table_name;
|
|
||||||
use common_config::Mode;
|
use common_config::Mode;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache_invalidator::{CacheInvalidator, Context, MultiCacheInvalidator};
|
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||||
use common_meta::instruction::CacheIdent;
|
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::TableInfoValue;
|
use common_meta::key::table_info::TableInfoValue;
|
||||||
@@ -35,20 +32,20 @@ use common_meta::kv_backend::KvBackendRef;
|
|||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use futures_util::{StreamExt, TryStreamExt};
|
use futures_util::{StreamExt, TryStreamExt};
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
use moka::future::{Cache as AsyncCache, CacheBuilder};
|
|
||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::Error::{GetTableCache, TableCacheNotGet};
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
TableCacheNotGetSnafu, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::InformationSchemaProvider;
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
|
use crate::kvbackend::TableCacheRef;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
|
|
||||||
/// Access all existing catalog, schema and tables.
|
/// Access all existing catalog, schema and tables.
|
||||||
@@ -64,64 +61,27 @@ pub struct KvBackendCatalogManager {
|
|||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
system_catalog: SystemCatalog,
|
system_catalog: SystemCatalog,
|
||||||
table_cache: AsyncCache<String, TableRef>,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
}
|
|
||||||
|
|
||||||
struct TableCacheInvalidator {
|
|
||||||
table_cache: AsyncCache<String, TableRef>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableCacheInvalidator {
|
|
||||||
pub fn new(table_cache: AsyncCache<String, TableRef>) -> Self {
|
|
||||||
Self { table_cache }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl CacheInvalidator for TableCacheInvalidator {
|
|
||||||
async fn invalidate(
|
|
||||||
&self,
|
|
||||||
_ctx: &Context,
|
|
||||||
caches: Vec<CacheIdent>,
|
|
||||||
) -> common_meta::error::Result<()> {
|
|
||||||
for cache in caches {
|
|
||||||
if let CacheIdent::TableName(table_name) = cache {
|
|
||||||
let table_cache_key = format_full_table_name(
|
|
||||||
&table_name.catalog_name,
|
|
||||||
&table_name.schema_name,
|
|
||||||
&table_name.table_name,
|
|
||||||
);
|
|
||||||
self.table_cache.invalidate(&table_cache_key).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||||
const TABLE_CACHE_MAX_CAPACITY: u64 = 65536;
|
|
||||||
const TABLE_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
|
||||||
const TABLE_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
|
||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub async fn new(
|
pub fn new(
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
meta_client: Option<Arc<MetaClient>>,
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
multi_cache_invalidator: Arc<MultiCacheInvalidator>,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
let table_cache: AsyncCache<String, TableRef> = CacheBuilder::new(TABLE_CACHE_MAX_CAPACITY)
|
|
||||||
.time_to_live(TABLE_CACHE_TTL)
|
|
||||||
.time_to_idle(TABLE_CACHE_TTI)
|
|
||||||
.build();
|
|
||||||
multi_cache_invalidator
|
|
||||||
.add_invalidator(Arc::new(TableCacheInvalidator::new(table_cache.clone())))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
mode,
|
mode,
|
||||||
meta_client,
|
meta_client,
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
|
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||||
|
backend.clone(),
|
||||||
|
cache_registry
|
||||||
|
.get()
|
||||||
|
.expect("Failed to get table_route_cache"),
|
||||||
|
)),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
catalog_manager: me.clone(),
|
catalog_manager: me.clone(),
|
||||||
@@ -131,7 +91,7 @@ impl KvBackendCatalogManager {
|
|||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
table_cache,
|
cache_registry,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,6 +100,12 @@ impl KvBackendCatalogManager {
|
|||||||
&self.mode
|
&self.mode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||||
|
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "view_info_cache",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `[MetaClient]`.
|
/// Returns the `[MetaClient]`.
|
||||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||||
self.meta_client.clone()
|
self.meta_client.clone()
|
||||||
@@ -218,7 +184,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
async fn schema_exists(&self, catalog: &str, schema: &str) -> Result<bool> {
|
||||||
if self.system_catalog.schema_exist(schema) {
|
if self.system_catalog.schema_exists(schema) {
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,7 +196,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
async fn table_exists(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
|
||||||
if self.system_catalog.table_exist(schema, table) {
|
if self.system_catalog.table_exists(schema, table) {
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,60 +211,29 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
|
|
||||||
async fn table(
|
async fn table(
|
||||||
&self,
|
&self,
|
||||||
catalog: &str,
|
catalog_name: &str,
|
||||||
schema: &str,
|
schema_name: &str,
|
||||||
table_name: &str,
|
table_name: &str,
|
||||||
) -> Result<Option<TableRef>> {
|
) -> Result<Option<TableRef>> {
|
||||||
if let Some(table) = self.system_catalog.table(catalog, schema, table_name) {
|
if let Some(table) = self
|
||||||
|
.system_catalog
|
||||||
|
.table(catalog_name, schema_name, table_name)
|
||||||
|
{
|
||||||
return Ok(Some(table));
|
return Ok(Some(table));
|
||||||
}
|
}
|
||||||
|
|
||||||
let init = async {
|
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
name: "table_cache",
|
||||||
let Some(table_name_value) = self
|
})?;
|
||||||
.table_metadata_manager
|
|
||||||
.table_name_manager()
|
|
||||||
.get(table_name_key)
|
|
||||||
.await
|
|
||||||
.context(TableMetadataManagerSnafu)?
|
|
||||||
else {
|
|
||||||
return TableCacheNotGetSnafu {
|
|
||||||
key: table_name_key.to_string(),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
let table_id = table_name_value.table_id();
|
|
||||||
|
|
||||||
let Some(table_info_value) = self
|
table_cache
|
||||||
.table_metadata_manager
|
.get_by_ref(&TableName {
|
||||||
.table_info_manager()
|
catalog_name: catalog_name.to_string(),
|
||||||
.get(table_id)
|
schema_name: schema_name.to_string(),
|
||||||
.await
|
table_name: table_name.to_string(),
|
||||||
.context(TableMetadataManagerSnafu)?
|
})
|
||||||
.map(|v| v.into_inner())
|
|
||||||
else {
|
|
||||||
return TableCacheNotGetSnafu {
|
|
||||||
key: table_name_key.to_string(),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
build_table(table_info_value)
|
|
||||||
};
|
|
||||||
|
|
||||||
match self
|
|
||||||
.table_cache
|
|
||||||
.try_get_with_by_ref(&format_full_table_name(catalog, schema, table_name), init)
|
|
||||||
.await
|
.await
|
||||||
{
|
.context(GetTableCacheSnafu)
|
||||||
Ok(table) => Ok(Some(table)),
|
|
||||||
Err(err) => match err.as_ref() {
|
|
||||||
TableCacheNotGet { .. } => Ok(None),
|
|
||||||
_ => Err(err),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
.map_err(|err| GetTableCache {
|
|
||||||
err_msg: err.to_string(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
fn tables<'a>(&'a self, catalog: &'a str, schema: &'a str) -> BoxStream<'a, Result<TableRef>> {
|
||||||
@@ -382,11 +317,11 @@ impl SystemCatalog {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schema_exist(&self, schema: &str) -> bool {
|
fn schema_exists(&self, schema: &str) -> bool {
|
||||||
schema == INFORMATION_SCHEMA_NAME
|
schema == INFORMATION_SCHEMA_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
fn table_exist(&self, schema: &str, table: &str) -> bool {
|
fn table_exists(&self, schema: &str, table: &str) -> bool {
|
||||||
if schema == INFORMATION_SCHEMA_NAME {
|
if schema == INFORMATION_SCHEMA_NAME {
|
||||||
self.information_schema_provider.table(table).is_some()
|
self.information_schema_provider.table(table).is_some()
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
|
|||||||
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
80
src/catalog/src/kvbackend/table_cache.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||||
|
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||||
|
use common_meta::instruction::CacheIdent;
|
||||||
|
use futures::future::BoxFuture;
|
||||||
|
use moka::future::Cache;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use table::dist_table::DistTable;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
use table::TableRef;
|
||||||
|
|
||||||
|
pub type TableCacheRef = Arc<TableCache>;
|
||||||
|
|
||||||
|
/// [TableCache] caches the [TableName] to [TableRef] mapping.
|
||||||
|
pub type TableCache = CacheContainer<TableName, TableRef, CacheIdent>;
|
||||||
|
|
||||||
|
/// Constructs a [TableCache].
|
||||||
|
pub fn new_table_cache(
|
||||||
|
name: String,
|
||||||
|
cache: Cache<TableName, TableRef>,
|
||||||
|
table_info_cache: TableInfoCacheRef,
|
||||||
|
table_name_cache: TableNameCacheRef,
|
||||||
|
) -> TableCache {
|
||||||
|
let init = init_factory(table_info_cache, table_name_cache);
|
||||||
|
|
||||||
|
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_factory(
|
||||||
|
table_info_cache: TableInfoCacheRef,
|
||||||
|
table_name_cache: TableNameCacheRef,
|
||||||
|
) -> Initializer<TableName, TableRef> {
|
||||||
|
Arc::new(move |table_name| {
|
||||||
|
let table_info_cache = table_info_cache.clone();
|
||||||
|
let table_name_cache = table_name_cache.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let table_id = table_name_cache
|
||||||
|
.get_by_ref(table_name)
|
||||||
|
.await?
|
||||||
|
.context(ValueNotExistSnafu)?;
|
||||||
|
let table_info = table_info_cache
|
||||||
|
.get_by_ref(&table_id)
|
||||||
|
.await?
|
||||||
|
.context(ValueNotExistSnafu)?;
|
||||||
|
|
||||||
|
Ok(Some(DistTable::table(table_info)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn invalidator<'a>(
|
||||||
|
cache: &'a Cache<TableName, TableRef>,
|
||||||
|
ident: &'a CacheIdent,
|
||||||
|
) -> BoxFuture<'a, MetaResult<()>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if let CacheIdent::TableName(table_name) = ident {
|
||||||
|
cache.invalidate(table_name).await
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn filter(ident: &CacheIdent) -> bool {
|
||||||
|
matches!(ident, CacheIdent::TableName(_))
|
||||||
|
}
|
||||||
@@ -15,15 +15,25 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||||
use datafusion::datasource::provider_as_source;
|
use datafusion::datasource::view::ViewTable;
|
||||||
|
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||||
use datafusion::logical_expr::TableSource;
|
use datafusion::logical_expr::TableSource;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
use table::metadata::TableType;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
mod dummy_catalog;
|
||||||
|
use dummy_catalog::DummyCatalogList;
|
||||||
|
|
||||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
use crate::error::{
|
||||||
|
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||||
|
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||||
|
};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::CatalogManagerRef;
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
pub struct DfTableSourceProvider {
|
pub struct DfTableSourceProvider {
|
||||||
@@ -32,6 +42,7 @@ pub struct DfTableSourceProvider {
|
|||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
default_catalog: String,
|
default_catalog: String,
|
||||||
default_schema: String,
|
default_schema: String,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DfTableSourceProvider {
|
impl DfTableSourceProvider {
|
||||||
@@ -39,6 +50,7 @@ impl DfTableSourceProvider {
|
|||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
query_ctx: &QueryContext,
|
query_ctx: &QueryContext,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
@@ -46,6 +58,7 @@ impl DfTableSourceProvider {
|
|||||||
resolved_tables: HashMap::new(),
|
resolved_tables: HashMap::new(),
|
||||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||||
default_schema: query_ctx.current_schema().to_owned(),
|
default_schema: query_ctx.current_schema().to_owned(),
|
||||||
|
plan_decoder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,8 +107,39 @@ impl DfTableSourceProvider {
|
|||||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let provider = DfTableProviderAdapter::new(table);
|
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||||
let source = provider_as_source(Arc::new(provider));
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.context(CastManagerSnafu)?;
|
||||||
|
|
||||||
|
let view_info = catalog_manager
|
||||||
|
.view_info_cache()?
|
||||||
|
.get(table.table_info().ident.table_id)
|
||||||
|
.await
|
||||||
|
.context(GetViewCacheSnafu)?
|
||||||
|
.context(ViewInfoNotFoundSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Build the catalog list provider for deserialization.
|
||||||
|
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||||
|
let logical_plan = self
|
||||||
|
.plan_decoder
|
||||||
|
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||||
|
.await
|
||||||
|
.context(DecodePlanSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||||
|
} else {
|
||||||
|
Arc::new(DfTableProviderAdapter::new(table))
|
||||||
|
};
|
||||||
|
|
||||||
|
let source = provider_as_source(provider);
|
||||||
|
|
||||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||||
Ok(source)
|
Ok(source)
|
||||||
}
|
}
|
||||||
@@ -103,6 +147,7 @@ impl DfTableSourceProvider {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use common_query::test_util::DummyDecoder;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -112,8 +157,12 @@ mod tests {
|
|||||||
fn test_validate_table_ref() {
|
fn test_validate_table_ref() {
|
||||||
let query_ctx = &QueryContext::with("greptime", "public");
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
|
||||||
let table_provider =
|
let table_provider = DfTableSourceProvider::new(
|
||||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
MemoryCatalogManager::with_default_setup(),
|
||||||
|
true,
|
||||||
|
query_ctx,
|
||||||
|
DummyDecoder::arc(),
|
||||||
|
);
|
||||||
|
|
||||||
let table_ref = TableReference::bare("table_name");
|
let table_ref = TableReference::bare("table_name");
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
@@ -148,4 +197,99 @@ mod tests {
|
|||||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||||
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
|
use common_config::Mode;
|
||||||
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
use common_query::error::Result as QueryResult;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||||
|
use datafusion::catalog::CatalogProviderList;
|
||||||
|
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||||
|
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||||
|
|
||||||
|
struct MockDecoder;
|
||||||
|
impl MockDecoder {
|
||||||
|
pub fn arc() -> Arc<Self> {
|
||||||
|
Arc::new(MockDecoder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl SubstraitPlanDecoder for MockDecoder {
|
||||||
|
async fn decode(
|
||||||
|
&self,
|
||||||
|
_message: bytes::Bytes,
|
||||||
|
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
|
_optimize: bool,
|
||||||
|
) -> QueryResult<LogicalPlan> {
|
||||||
|
Ok(mock_plan())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mock_plan() -> LogicalPlan {
|
||||||
|
let schema = Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int32, true),
|
||||||
|
Field::new("name", DataType::Utf8, true),
|
||||||
|
]);
|
||||||
|
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
|
||||||
|
|
||||||
|
let projection = None;
|
||||||
|
|
||||||
|
let builder =
|
||||||
|
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
|
||||||
|
|
||||||
|
builder
|
||||||
|
.filter(col("id").gt(lit(500)))
|
||||||
|
.unwrap()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_resolve_view() {
|
||||||
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
let backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||||
|
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||||
|
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
|
Mode::Standalone,
|
||||||
|
None,
|
||||||
|
backend.clone(),
|
||||||
|
layered_cache_registry,
|
||||||
|
);
|
||||||
|
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||||
|
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||||
|
view_info.table_type = TableType::View;
|
||||||
|
let logical_plan = vec![1, 2, 3];
|
||||||
|
// Create view metadata
|
||||||
|
table_metadata_manager
|
||||||
|
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut table_provider =
|
||||||
|
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||||
|
|
||||||
|
// View not found
|
||||||
|
let table_ref = TableReference::bare("not_exists_view");
|
||||||
|
assert!(table_provider.resolve_table(table_ref).await.is_err());
|
||||||
|
|
||||||
|
let table_ref = TableReference::bare(view_info.name);
|
||||||
|
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||||
|
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
|
use std::any::Any;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use common_catalog::format_full_table_name;
|
||||||
|
use datafusion::catalog::schema::SchemaProvider;
|
||||||
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
||||||
|
use datafusion::datasource::TableProvider;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
|
||||||
|
use crate::error::TableNotExistSnafu;
|
||||||
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
|
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyCatalogList {
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DummyCatalogList {
|
||||||
|
/// Creates a new catalog list with the given catalog manager.
|
||||||
|
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
|
||||||
|
Self { catalog_manager }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register_catalog(
|
||||||
|
&self,
|
||||||
|
_name: String,
|
||||||
|
_catalog: Arc<dyn CatalogProvider>,
|
||||||
|
) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
Some(Arc::new(DummyCatalogProvider {
|
||||||
|
catalog_name: catalog_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy catalog provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummyCatalogProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProvider for DummyCatalogProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
|
||||||
|
Some(Arc::new(DummySchemaProvider {
|
||||||
|
catalog_name: self.catalog_name.clone(),
|
||||||
|
schema_name: schema_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummySchemaProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
schema_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SchemaProvider for DummySchemaProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||||
|
let table = self
|
||||||
|
.catalog_manager
|
||||||
|
.table(&self.catalog_name, &self.schema_name, name)
|
||||||
|
.await?
|
||||||
|
.with_context(|| TableNotExistSnafu {
|
||||||
|
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
|
||||||
|
|
||||||
|
Ok(Some(table_provider))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_exist(&self, _name: &str) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,9 +19,10 @@ use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
|||||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||||
use api::v1::HealthCheckRequest;
|
use api::v1::HealthCheckRequest;
|
||||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||||
use common_grpc::channel_manager::ChannelManager;
|
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use tonic::codec::CompressionEncoding;
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
use crate::load_balance::{LoadBalance, Loadbalancer};
|
use crate::load_balance::{LoadBalance, Loadbalancer};
|
||||||
@@ -86,6 +87,17 @@ impl Client {
|
|||||||
Self::with_manager_and_urls(ChannelManager::new(), urls)
|
Self::with_manager_and_urls(ChannelManager::new(), urls)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_tls_and_urls<U, A>(urls: A, client_tls: ClientTlsOption) -> Result<Self>
|
||||||
|
where
|
||||||
|
U: AsRef<str>,
|
||||||
|
A: AsRef<[U]>,
|
||||||
|
{
|
||||||
|
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
|
||||||
|
let channel_manager = ChannelManager::with_tls_config(channel_config)
|
||||||
|
.context(error::CreateTlsChannelSnafu)?;
|
||||||
|
Ok(Self::with_manager_and_urls(channel_manager, urls))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
|
pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
|
||||||
where
|
where
|
||||||
U: AsRef<str>,
|
U: AsRef<str>,
|
||||||
@@ -151,24 +163,34 @@ impl Client {
|
|||||||
|
|
||||||
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
pub fn make_flight_client(&self) -> Result<FlightClient> {
|
||||||
let (addr, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
Ok(FlightClient {
|
|
||||||
addr,
|
let client = FlightServiceClient::new(channel)
|
||||||
client: FlightServiceClient::new(channel)
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size()),
|
.accept_compressed(CompressionEncoding::Zstd)
|
||||||
})
|
.send_compressed(CompressionEncoding::Zstd);
|
||||||
|
|
||||||
|
Ok(FlightClient { addr, client })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
|
||||||
let (_, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
Ok(PbRegionClient::new(channel)
|
let client = PbRegionClient::new(channel)
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size()))
|
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||||
|
.accept_compressed(CompressionEncoding::Zstd)
|
||||||
|
.send_compressed(CompressionEncoding::Zstd);
|
||||||
|
Ok((addr, client))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||||
let (_, channel) = self.find_channel()?;
|
let (_, channel) = self.find_channel()?;
|
||||||
Ok(PrometheusGatewayClient::new(channel))
|
let client = PrometheusGatewayClient::new(channel)
|
||||||
|
.accept_compressed(CompressionEncoding::Gzip)
|
||||||
|
.accept_compressed(CompressionEncoding::Zstd)
|
||||||
|
.send_compressed(CompressionEncoding::Gzip)
|
||||||
|
.send_compressed(CompressionEncoding::Zstd);
|
||||||
|
Ok(client)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn health_check(&self) -> Result<()> {
|
pub async fn health_check(&self) -> Result<()> {
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ use api::v1::{
|
|||||||
};
|
};
|
||||||
use arrow_flight::Ticket;
|
use arrow_flight::Ticket;
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
|
||||||
use client::{from_grpc_response, Client, Result};
|
|
||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
@@ -37,7 +35,8 @@ use prost::Message;
|
|||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
|
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||||
|
use crate::{from_grpc_response, Client, Result};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
@@ -105,10 +104,18 @@ impl Database {
|
|||||||
self.catalog = catalog.into();
|
self.catalog = catalog.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn catalog(&self) -> &String {
|
||||||
|
&self.catalog
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||||
self.schema = schema.into();
|
self.schema = schema.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn schema(&self) -> &String {
|
||||||
|
&self.schema
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
||||||
self.timezone = timezone.into();
|
self.timezone = timezone.into();
|
||||||
}
|
}
|
||||||
@@ -156,6 +163,13 @@ impl Database {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||||
|
self.do_get(Request::Query(QueryRequest {
|
||||||
|
query: Some(Query::LogicalPlan(logical_plan)),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||||
self.do_get(Request::Ddl(DdlRequest {
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
expr: Some(DdlExpr::CreateTable(expr)),
|
expr: Some(DdlExpr::CreateTable(expr)),
|
||||||
@@ -269,16 +283,12 @@ struct FlightContext {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
use api::v1::auth_header::AuthScheme;
|
use api::v1::auth_header::AuthScheme;
|
||||||
use api::v1::{AuthHeader, Basic};
|
use api::v1::{AuthHeader, Basic};
|
||||||
use clap::Parser;
|
|
||||||
use client::Client;
|
|
||||||
use cmd::error::Result as CmdResult;
|
|
||||||
use cmd::options::{GlobalOptions, Options};
|
|
||||||
use cmd::{cli, standalone, App};
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
|
|
||||||
use super::{Database, FlightContext};
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_flight_ctx() {
|
fn test_flight_ctx() {
|
||||||
@@ -294,79 +304,11 @@ mod tests {
|
|||||||
auth_scheme: Some(basic),
|
auth_scheme: Some(basic),
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(matches!(
|
assert_matches!(
|
||||||
ctx.auth_header,
|
ctx.auth_header,
|
||||||
Some(AuthHeader {
|
Some(AuthHeader {
|
||||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||||
})
|
})
|
||||||
))
|
)
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
|
||||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
|
|
||||||
let standalone = standalone::Command::parse_from([
|
|
||||||
"standalone",
|
|
||||||
"start",
|
|
||||||
"--data-home",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
]);
|
|
||||||
let Options::Standalone(standalone_opts) =
|
|
||||||
standalone.load_options(&GlobalOptions::default())?
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
let mut instance = standalone.build(*standalone_opts).await?;
|
|
||||||
instance.start().await?;
|
|
||||||
|
|
||||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
|
||||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
database
|
|
||||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
database
|
|
||||||
.sql(
|
|
||||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
|
||||||
ts TIMESTAMP,
|
|
||||||
TIME INDEX (ts)
|
|
||||||
) engine=mito;
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
let cli = cli::Command::parse_from([
|
|
||||||
"cli",
|
|
||||||
"export",
|
|
||||||
"--addr",
|
|
||||||
"127.0.0.1:4000",
|
|
||||||
"--output-dir",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
"--target",
|
|
||||||
"create-table",
|
|
||||||
]);
|
|
||||||
let mut cli_app = cli.build().await?;
|
|
||||||
cli_app.start().await?;
|
|
||||||
|
|
||||||
instance.stop().await?;
|
|
||||||
|
|
||||||
let output_file = output_dir
|
|
||||||
.path()
|
|
||||||
.join("greptime-cli.export.create_table.sql");
|
|
||||||
let res = std::fs::read_to_string(output_file).unwrap();
|
|
||||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
|
||||||
"ts" TIMESTAMP(3) NOT NULL,
|
|
||||||
TIME INDEX ("ts")
|
|
||||||
)
|
|
||||||
|
|
||||||
ENGINE=mito
|
|
||||||
;
|
|
||||||
"#;
|
|
||||||
assert_eq!(res.trim(), expect.trim());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -18,7 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{location, Location, Snafu};
|
||||||
use tonic::{Code, Status};
|
use tonic::{Code, Status};
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
@@ -26,7 +26,11 @@ use tonic::{Code, Status};
|
|||||||
#[stack_trace_debug]
|
#[stack_trace_debug]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
#[snafu(display("Illegal Flight messages, reason: {}", reason))]
|
||||||
IllegalFlightMessages { reason: String, location: Location },
|
IllegalFlightMessages {
|
||||||
|
reason: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
|
#[snafu(display("Failed to do Flight get, code: {}", tonic_code))]
|
||||||
FlightGet {
|
FlightGet {
|
||||||
@@ -37,47 +41,85 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failure occurs during handling request"))]
|
#[snafu(display("Failure occurs during handling request"))]
|
||||||
HandleRequest {
|
HandleRequest {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to convert FlightData"))]
|
#[snafu(display("Failed to convert FlightData"))]
|
||||||
ConvertFlightData {
|
ConvertFlightData {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_grpc::Error,
|
source: common_grpc::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Column datatype error"))]
|
#[snafu(display("Column datatype error"))]
|
||||||
ColumnDataType {
|
ColumnDataType {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: api::error::Error,
|
source: api::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
|
||||||
IllegalGrpcClientState { err_msg: String, location: Location },
|
IllegalGrpcClientState {
|
||||||
|
err_msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
#[snafu(display("Missing required field in protobuf, field: {}", field))]
|
||||||
MissingField { field: String, location: Location },
|
MissingField {
|
||||||
|
field: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
|
#[snafu(display("Failed to create gRPC channel, peer address: {}", addr))]
|
||||||
CreateChannel {
|
CreateChannel {
|
||||||
addr: String,
|
addr: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_grpc::error::Error,
|
source: common_grpc::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
#[snafu(display("Failed to create Tls channel manager"))]
|
||||||
RegionServer { code: Code, source: BoxedError },
|
CreateTlsChannel {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: common_grpc::error::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
|
||||||
|
RegionServer {
|
||||||
|
addr: String,
|
||||||
|
code: Code,
|
||||||
|
source: BoxedError,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
// Server error carried in Tonic Status's metadata.
|
// Server error carried in Tonic Status's metadata.
|
||||||
#[snafu(display("{}", msg))]
|
#[snafu(display("{}", msg))]
|
||||||
Server { code: StatusCode, msg: String },
|
Server {
|
||||||
|
code: StatusCode,
|
||||||
|
msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Illegal Database response: {err_msg}"))]
|
#[snafu(display("Illegal Database response: {err_msg}"))]
|
||||||
IllegalDatabaseResponse { err_msg: String },
|
IllegalDatabaseResponse {
|
||||||
|
err_msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
#[snafu(display("Failed to send request with streaming: {}", err_msg))]
|
||||||
ClientStreaming { err_msg: String, location: Location },
|
ClientStreaming {
|
||||||
|
err_msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -95,9 +137,9 @@ impl ErrorExt for Error {
|
|||||||
Error::FlightGet { source, .. }
|
Error::FlightGet { source, .. }
|
||||||
| Error::HandleRequest { source, .. }
|
| Error::HandleRequest { source, .. }
|
||||||
| Error::RegionServer { source, .. } => source.status_code(),
|
| Error::RegionServer { source, .. } => source.status_code(),
|
||||||
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source, .. } => {
|
Error::CreateChannel { source, .. }
|
||||||
source.status_code()
|
| Error::ConvertFlightData { source, .. }
|
||||||
}
|
| Error::CreateTlsChannel { source, .. } => source.status_code(),
|
||||||
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -128,7 +170,11 @@ impl From<Status> for Error {
|
|||||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||||
.unwrap_or_else(|| e.message().to_string());
|
.unwrap_or_else(|| e.message().to_string());
|
||||||
|
|
||||||
Self::Server { code, msg }
|
Self::Server {
|
||||||
|
code,
|
||||||
|
msg,
|
||||||
|
location: location!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#![feature(assert_matches)]
|
||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
pub mod client_manager;
|
pub mod client_manager;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
|
mod database;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod load_balance;
|
pub mod load_balance;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
@@ -29,6 +33,8 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
pub use self::client::Client;
|
pub use self::client::Client;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
|
pub use self::database::Database;
|
||||||
pub use self::error::{Error, Result};
|
pub use self::error::{Error, Result};
|
||||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||||
|
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ impl RegionRequester {
|
|||||||
.with_label_values(&[request_type.as_str()])
|
.with_label_values(&[request_type.as_str()])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
let mut client = self.client.raw_region_client()?;
|
let (addr, mut client) = self.client.raw_region_client()?;
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
.handle(request)
|
.handle(request)
|
||||||
@@ -187,8 +187,10 @@ impl RegionRequester {
|
|||||||
let err: error::Error = e.into();
|
let err: error::Error = e.into();
|
||||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||||
error::Error::RegionServer {
|
error::Error::RegionServer {
|
||||||
|
addr,
|
||||||
code,
|
code,
|
||||||
source: BoxedError::new(err),
|
source: BoxedError::new(err),
|
||||||
|
location: location!(),
|
||||||
}
|
}
|
||||||
})?
|
})?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
@@ -272,7 +274,7 @@ mod test {
|
|||||||
err_msg: "blabla".to_string(),
|
err_msg: "blabla".to_string(),
|
||||||
}),
|
}),
|
||||||
}));
|
}));
|
||||||
let Server { code, msg } = result.unwrap_err() else {
|
let Server { code, msg, .. } = result.unwrap_err() else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
assert_eq!(code, StatusCode::Internal);
|
assert_eq!(code, StatusCode::Internal);
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ workspace = true
|
|||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
|
cache.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
@@ -27,6 +28,7 @@ common-base.workspace = true
|
|||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
|
common-grpc.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
common-procedure.workspace = true
|
common-procedure.workspace = true
|
||||||
@@ -39,12 +41,12 @@ common-telemetry = { workspace = true, features = [
|
|||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
config = "0.13"
|
|
||||||
datanode.workspace = true
|
datanode.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
either = "1.8"
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
file-engine.workspace = true
|
file-engine.workspace = true
|
||||||
|
flow.workspace = true
|
||||||
frontend.workspace = true
|
frontend.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
human-panic = "1.2.2"
|
human-panic = "1.2.2"
|
||||||
@@ -52,6 +54,7 @@ lazy_static.workspace = true
|
|||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
mito2.workspace = true
|
mito2.workspace = true
|
||||||
|
moka.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
plugins.workspace = true
|
plugins.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
@@ -71,11 +74,13 @@ substrait.workspace = true
|
|||||||
table.workspace = true
|
table.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
|
tracing-appender = "0.2"
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dependencies]
|
[target.'cfg(not(windows))'.dependencies]
|
||||||
tikv-jemallocator = "0.5"
|
tikv-jemallocator = "0.5"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
client = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
temp-env = "0.3"
|
temp-env = "0.3"
|
||||||
|
|||||||
@@ -14,13 +14,11 @@
|
|||||||
|
|
||||||
#![doc = include_str!("../../../../README.md")]
|
#![doc = include_str!("../../../../README.md")]
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use cmd::error::Result;
|
use cmd::error::Result;
|
||||||
use cmd::options::{GlobalOptions, Options};
|
use cmd::options::GlobalOptions;
|
||||||
use cmd::{cli, datanode, frontend, log_versions, metasrv, standalone, start_app, App};
|
use cmd::{cli, datanode, frontend, metasrv, standalone, App};
|
||||||
use common_version::{short_version, version};
|
use common_version::version;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
#[command(name = "greptime", author, version, long_version = version!(), about)]
|
||||||
@@ -56,58 +54,6 @@ enum SubCommand {
|
|||||||
Cli(cli::Command),
|
Cli(cli::Command),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
|
||||||
async fn build(self, opts: Options) -> Result<Box<dyn App>> {
|
|
||||||
let app: Box<dyn App> = match (self, opts) {
|
|
||||||
(SubCommand::Datanode(cmd), Options::Datanode(dn_opts)) => {
|
|
||||||
let app = cmd.build(*dn_opts).await?;
|
|
||||||
Box::new(app) as _
|
|
||||||
}
|
|
||||||
(SubCommand::Frontend(cmd), Options::Frontend(fe_opts)) => {
|
|
||||||
let app = cmd.build(*fe_opts).await?;
|
|
||||||
Box::new(app) as _
|
|
||||||
}
|
|
||||||
(SubCommand::Metasrv(cmd), Options::Metasrv(meta_opts)) => {
|
|
||||||
let app = cmd.build(*meta_opts).await?;
|
|
||||||
Box::new(app) as _
|
|
||||||
}
|
|
||||||
(SubCommand::Standalone(cmd), Options::Standalone(opts)) => {
|
|
||||||
let app = cmd.build(*opts).await?;
|
|
||||||
Box::new(app) as _
|
|
||||||
}
|
|
||||||
(SubCommand::Cli(cmd), Options::Cli(_)) => {
|
|
||||||
let app = cmd.build().await?;
|
|
||||||
Box::new(app) as _
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
Ok(app)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
|
||||||
match self {
|
|
||||||
SubCommand::Datanode(cmd) => cmd.load_options(global_options),
|
|
||||||
SubCommand::Frontend(cmd) => cmd.load_options(global_options),
|
|
||||||
SubCommand::Metasrv(cmd) => cmd.load_options(global_options),
|
|
||||||
SubCommand::Standalone(cmd) => cmd.load_options(global_options),
|
|
||||||
SubCommand::Cli(cmd) => cmd.load_options(global_options),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for SubCommand {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
SubCommand::Datanode(..) => write!(f, "greptime-datanode"),
|
|
||||||
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
|
|
||||||
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
|
|
||||||
SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
|
|
||||||
SubCommand::Cli(_) => write!(f, "greptime-cli"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||||
@@ -119,24 +65,38 @@ async fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn start(cli: Command) -> Result<()> {
|
async fn start(cli: Command) -> Result<()> {
|
||||||
let subcmd = cli.subcmd;
|
match cli.subcmd {
|
||||||
|
SubCommand::Datanode(cmd) => {
|
||||||
let app_name = subcmd.to_string();
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
|
.await?
|
||||||
let opts = subcmd.load_options(&cli.global_options)?;
|
.run()
|
||||||
|
.await
|
||||||
let _guard = common_telemetry::init_global_logging(
|
}
|
||||||
&app_name,
|
SubCommand::Frontend(cmd) => {
|
||||||
opts.logging_options(),
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
cli.global_options.tracing_options(),
|
.await?
|
||||||
opts.node_id(),
|
.run()
|
||||||
);
|
.await
|
||||||
|
}
|
||||||
log_versions(version!(), short_version!());
|
SubCommand::Metasrv(cmd) => {
|
||||||
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
let app = subcmd.build(opts).await?;
|
.await?
|
||||||
|
.run()
|
||||||
start_app(app).await
|
.await
|
||||||
|
}
|
||||||
|
SubCommand::Standalone(cmd) => {
|
||||||
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
|
.await?
|
||||||
|
.run()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
SubCommand::Cli(cmd) => {
|
||||||
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
|
.await?
|
||||||
|
.run()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_human_panic() {
|
fn setup_human_panic() {
|
||||||
|
|||||||
@@ -22,23 +22,22 @@ mod helper;
|
|||||||
|
|
||||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
// mod repl;
|
mod repl;
|
||||||
// TODO(weny): Removes it
|
|
||||||
#[allow(deprecated)]
|
|
||||||
mod upgrade;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bench::BenchTableMetadataCommand;
|
use bench::BenchTableMetadataCommand;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging::LoggingOptions;
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
// pub use repl::Repl;
|
pub use repl::Repl;
|
||||||
use upgrade::UpgradeCommand;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use self::export::ExportCommand;
|
use self::export::ExportCommand;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::options::{GlobalOptions, Options};
|
use crate::options::GlobalOptions;
|
||||||
use crate::App;
|
use crate::App;
|
||||||
|
|
||||||
|
pub const APP_NAME: &str = "greptime-cli";
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Tool: Send + Sync {
|
pub trait Tool: Send + Sync {
|
||||||
async fn do_work(&self) -> Result<()>;
|
async fn do_work(&self) -> Result<()>;
|
||||||
@@ -46,24 +45,34 @@ pub trait Tool: Send + Sync {
|
|||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
tool: Box<dyn Tool>,
|
tool: Box<dyn Tool>,
|
||||||
|
|
||||||
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
fn new(tool: Box<dyn Tool>) -> Self {
|
fn new(tool: Box<dyn Tool>, guard: Vec<WorkerGuard>) -> Self {
|
||||||
Self { tool }
|
Self {
|
||||||
|
tool,
|
||||||
|
_guard: guard,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl App for Instance {
|
impl App for Instance {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"greptime-cli"
|
APP_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
self.tool.do_work().await
|
self.tool.do_work().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn wait_signal(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
async fn stop(&self) -> Result<()> {
|
async fn stop(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -76,11 +85,18 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(self) -> Result<Instance> {
|
pub async fn build(&self, opts: LoggingOptions) -> Result<Instance> {
|
||||||
self.cmd.build().await
|
let guard = common_telemetry::init_global_logging(
|
||||||
|
APP_NAME,
|
||||||
|
&opts,
|
||||||
|
&TracingOptions::default(),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.cmd.build(guard).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<LoggingOptions> {
|
||||||
let mut logging_opts = LoggingOptions::default();
|
let mut logging_opts = LoggingOptions::default();
|
||||||
|
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
@@ -89,25 +105,23 @@ impl Command {
|
|||||||
|
|
||||||
logging_opts.level.clone_from(&global_options.log_level);
|
logging_opts.level.clone_from(&global_options.log_level);
|
||||||
|
|
||||||
Ok(Options::Cli(Box::new(logging_opts)))
|
Ok(logging_opts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
// Attach(AttachCommand),
|
// Attach(AttachCommand),
|
||||||
Upgrade(UpgradeCommand),
|
|
||||||
Bench(BenchTableMetadataCommand),
|
Bench(BenchTableMetadataCommand),
|
||||||
Export(ExportCommand),
|
Export(ExportCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self) -> Result<Instance> {
|
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
SubCommand::Upgrade(cmd) => cmd.build().await,
|
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||||
SubCommand::Bench(cmd) => cmd.build().await,
|
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||||
SubCommand::Export(cmd) => cmd.build().await,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,13 +23,14 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
|||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use self::metadata::TableMetadataBencher;
|
use self::metadata::TableMetadataBencher;
|
||||||
use crate::cli::{Instance, Tool};
|
use crate::cli::{Instance, Tool};
|
||||||
@@ -61,7 +62,7 @@ pub struct BenchTableMetadataCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BenchTableMetadataCommand {
|
impl BenchTableMetadataCommand {
|
||||||
pub async fn build(&self) -> Result<Instance> {
|
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||||
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
let etcd_store = EtcdStore::with_endpoints([&self.etcd_addr], 128)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -72,7 +73,7 @@ impl BenchTableMetadataCommand {
|
|||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
count: self.count,
|
count: self.count,
|
||||||
};
|
};
|
||||||
Ok(Instance::new(Box::new(tool)))
|
Ok(Instance::new(Box::new(tool), guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cli::bench::{
|
use crate::cli::bench::{
|
||||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -28,6 +29,8 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
|
use tokio::time::Instant;
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
use crate::cli::{Instance, Tool};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -78,7 +81,7 @@ pub struct ExportCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ExportCommand {
|
impl ExportCommand {
|
||||||
pub async fn build(&self) -> Result<Instance> {
|
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||||
let (catalog, schema) = split_database(&self.database)?;
|
let (catalog, schema) = split_database(&self.database)?;
|
||||||
|
|
||||||
let auth_header = if let Some(basic) = &self.auth_basic {
|
let auth_header = if let Some(basic) = &self.auth_basic {
|
||||||
@@ -88,15 +91,18 @@ impl ExportCommand {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Instance::new(Box::new(Export {
|
Ok(Instance::new(
|
||||||
addr: self.addr.clone(),
|
Box::new(Export {
|
||||||
catalog,
|
addr: self.addr.clone(),
|
||||||
schema,
|
catalog,
|
||||||
output_dir: self.output_dir.clone(),
|
schema,
|
||||||
parallelism: self.export_jobs,
|
output_dir: self.output_dir.clone(),
|
||||||
target: self.target.clone(),
|
parallelism: self.export_jobs,
|
||||||
auth_header,
|
target: self.target.clone(),
|
||||||
})))
|
auth_header,
|
||||||
|
}),
|
||||||
|
guard,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,8 +180,34 @@ impl Export {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return a list of [`TableReference`] to be exported.
|
/// Return a list of [`TableReference`] to be exported.
|
||||||
/// Includes all tables under the given `catalog` and `schema`
|
/// Includes all tables under the given `catalog` and `schema`.
|
||||||
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
|
async fn get_table_list(
|
||||||
|
&self,
|
||||||
|
catalog: &str,
|
||||||
|
schema: &str,
|
||||||
|
) -> Result<(Vec<TableReference>, Vec<TableReference>)> {
|
||||||
|
// Puts all metric table first
|
||||||
|
let sql = format!(
|
||||||
|
"select table_catalog, table_schema, table_name from \
|
||||||
|
information_schema.columns where column_name = '__tsid' \
|
||||||
|
and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
|
||||||
|
);
|
||||||
|
let result = self.sql(&sql).await?;
|
||||||
|
let Some(records) = result else {
|
||||||
|
EmptyResultSnafu.fail()?
|
||||||
|
};
|
||||||
|
let mut metric_physical_tables = HashSet::with_capacity(records.len());
|
||||||
|
for value in records {
|
||||||
|
let mut t = Vec::with_capacity(3);
|
||||||
|
for v in &value {
|
||||||
|
let serde_json::Value::String(value) = v else {
|
||||||
|
unreachable!()
|
||||||
|
};
|
||||||
|
t.push(value);
|
||||||
|
}
|
||||||
|
metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: SQL injection hurts
|
// TODO: SQL injection hurts
|
||||||
let sql = format!(
|
let sql = format!(
|
||||||
"select table_catalog, table_schema, table_name from \
|
"select table_catalog, table_schema, table_name from \
|
||||||
@@ -190,10 +222,10 @@ impl Export {
|
|||||||
debug!("Fetched table list: {:?}", records);
|
debug!("Fetched table list: {:?}", records);
|
||||||
|
|
||||||
if records.is_empty() {
|
if records.is_empty() {
|
||||||
return Ok(vec![]);
|
return Ok((vec![], vec![]));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = Vec::with_capacity(records.len());
|
let mut remaining_tables = Vec::with_capacity(records.len());
|
||||||
for value in records {
|
for value in records {
|
||||||
let mut t = Vec::with_capacity(3);
|
let mut t = Vec::with_capacity(3);
|
||||||
for v in &value {
|
for v in &value {
|
||||||
@@ -202,10 +234,17 @@ impl Export {
|
|||||||
};
|
};
|
||||||
t.push(value);
|
t.push(value);
|
||||||
}
|
}
|
||||||
result.push((t[0].clone(), t[1].clone(), t[2].clone()));
|
let table = (t[0].clone(), t[1].clone(), t[2].clone());
|
||||||
|
// Ignores the physical table
|
||||||
|
if !metric_physical_tables.contains(&table) {
|
||||||
|
remaining_tables.push(table);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result)
|
Ok((
|
||||||
|
metric_physical_tables.into_iter().collect(),
|
||||||
|
remaining_tables,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
|
||||||
@@ -225,6 +264,7 @@ impl Export {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn export_create_table(&self) -> Result<()> {
|
async fn export_create_table(&self) -> Result<()> {
|
||||||
|
let timer = Instant::now();
|
||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
let db_names = self.iter_db_names().await?;
|
let db_names = self.iter_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
@@ -233,15 +273,16 @@ impl Export {
|
|||||||
let semaphore_moved = semaphore.clone();
|
let semaphore_moved = semaphore.clone();
|
||||||
tasks.push(async move {
|
tasks.push(async move {
|
||||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
let table_list = self.get_table_list(&catalog, &schema).await?;
|
let (metric_physical_tables, remaining_tables) =
|
||||||
let table_count = table_list.len();
|
self.get_table_list(&catalog, &schema).await?;
|
||||||
|
let table_count = metric_physical_tables.len() + remaining_tables.len();
|
||||||
tokio::fs::create_dir_all(&self.output_dir)
|
tokio::fs::create_dir_all(&self.output_dir)
|
||||||
.await
|
.await
|
||||||
.context(FileIoSnafu)?;
|
.context(FileIoSnafu)?;
|
||||||
let output_file =
|
let output_file =
|
||||||
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
Path::new(&self.output_dir).join(format!("{catalog}-{schema}.sql"));
|
||||||
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
let mut file = File::create(output_file).await.context(FileIoSnafu)?;
|
||||||
for (c, s, t) in table_list {
|
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||||
match self.show_create_table(&c, &s, &t).await {
|
match self.show_create_table(&c, &s, &t).await {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
error!(e; r#"Failed to export table "{}"."{}"."{}""#, c, s, t)
|
||||||
@@ -270,12 +311,14 @@ impl Export {
|
|||||||
})
|
})
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
info!("success {success}/{db_count} jobs");
|
let elapsed = timer.elapsed();
|
||||||
|
info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn export_table_data(&self) -> Result<()> {
|
async fn export_table_data(&self) -> Result<()> {
|
||||||
|
let timer = Instant::now();
|
||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||||
let db_names = self.iter_db_names().await?;
|
let db_names = self.iter_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
@@ -288,15 +331,25 @@ impl Export {
|
|||||||
.await
|
.await
|
||||||
.context(FileIoSnafu)?;
|
.context(FileIoSnafu)?;
|
||||||
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/"));
|
||||||
|
// Ignores metric physical tables
|
||||||
// copy database to
|
let (metrics_tables, table_list) = self.get_table_list(&catalog, &schema).await?;
|
||||||
let sql = format!(
|
for (_, _, table_name) in metrics_tables {
|
||||||
"copy database {} to '{}' with (format='parquet');",
|
warn!("Ignores metric physical table: {table_name}");
|
||||||
schema,
|
}
|
||||||
output_dir.to_str().unwrap()
|
for (catalog_name, schema_name, table_name) in table_list {
|
||||||
);
|
// copy table to
|
||||||
self.sql(&sql).await?;
|
let sql = format!(
|
||||||
info!("finished exporting {catalog}.{schema} data");
|
r#"Copy "{}"."{}"."{}" TO '{}{}.parquet' WITH (format='parquet');"#,
|
||||||
|
catalog_name,
|
||||||
|
schema_name,
|
||||||
|
table_name,
|
||||||
|
output_dir.to_str().unwrap(),
|
||||||
|
table_name,
|
||||||
|
);
|
||||||
|
info!("Executing sql: {sql}");
|
||||||
|
self.sql(&sql).await?;
|
||||||
|
}
|
||||||
|
info!("Finished exporting {catalog}.{schema} data");
|
||||||
|
|
||||||
// export copy from sql
|
// export copy from sql
|
||||||
let dir_filenames = match output_dir.read_dir() {
|
let dir_filenames = match output_dir.read_dir() {
|
||||||
@@ -351,8 +404,8 @@ impl Export {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.count();
|
.count();
|
||||||
|
let elapsed = timer.elapsed();
|
||||||
info!("success {success}/{db_count} jobs");
|
info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -381,3 +434,80 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
|||||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use clap::Parser;
|
||||||
|
use client::{Client, Database};
|
||||||
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use common_telemetry::logging::LoggingOptions;
|
||||||
|
|
||||||
|
use crate::error::Result as CmdResult;
|
||||||
|
use crate::options::GlobalOptions;
|
||||||
|
use crate::{cli, standalone, App};
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||||
|
let output_dir = tempfile::tempdir().unwrap();
|
||||||
|
|
||||||
|
let standalone = standalone::Command::parse_from([
|
||||||
|
"standalone",
|
||||||
|
"start",
|
||||||
|
"--data-home",
|
||||||
|
&*output_dir.path().to_string_lossy(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
||||||
|
let mut instance = standalone.build(standalone_opts).await?;
|
||||||
|
instance.start().await?;
|
||||||
|
|
||||||
|
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||||
|
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||||
|
database
|
||||||
|
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
database
|
||||||
|
.sql(
|
||||||
|
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||||
|
ts TIMESTAMP,
|
||||||
|
TIME INDEX (ts)
|
||||||
|
) engine=mito;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let output_dir = tempfile::tempdir().unwrap();
|
||||||
|
let cli = cli::Command::parse_from([
|
||||||
|
"cli",
|
||||||
|
"export",
|
||||||
|
"--addr",
|
||||||
|
"127.0.0.1:4000",
|
||||||
|
"--output-dir",
|
||||||
|
&*output_dir.path().to_string_lossy(),
|
||||||
|
"--target",
|
||||||
|
"create-table",
|
||||||
|
]);
|
||||||
|
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||||
|
cli_app.start().await?;
|
||||||
|
|
||||||
|
instance.stop().await?;
|
||||||
|
|
||||||
|
let output_file = output_dir
|
||||||
|
.path()
|
||||||
|
.join("greptime-cli.export.create_table.sql");
|
||||||
|
let res = std::fs::read_to_string(output_file).unwrap();
|
||||||
|
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||||
|
"ts" TIMESTAMP(3) NOT NULL,
|
||||||
|
TIME INDEX ("ts")
|
||||||
|
)
|
||||||
|
|
||||||
|
ENGINE=mito
|
||||||
|
;
|
||||||
|
"#;
|
||||||
|
assert_eq!(res.trim(), expect.trim());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,14 +16,18 @@ use std::path::PathBuf;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use catalog::kvbackend::{
|
use cache::{
|
||||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||||
|
TABLE_ROUTE_CACHE_NAME,
|
||||||
};
|
};
|
||||||
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use catalog::kvbackend::{
|
||||||
|
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||||
|
};
|
||||||
|
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_config::Mode;
|
use common_config::Mode;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
@@ -33,17 +37,18 @@ use query::datafusion::DatafusionQueryEngine;
|
|||||||
use query::logical_optimizer::LogicalOptimizer;
|
use query::logical_optimizer::LogicalOptimizer;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
use query::query_engine::QueryEngineState;
|
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
use rustyline::error::ReadlineError;
|
use rustyline::error::ReadlineError;
|
||||||
use rustyline::Editor;
|
use rustyline::Editor;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
|
|
||||||
use crate::cli::cmd::ReplCommand;
|
use crate::cli::cmd::ReplCommand;
|
||||||
use crate::cli::helper::RustylineHelper;
|
use crate::cli::helper::RustylineHelper;
|
||||||
use crate::cli::AttachCommand;
|
use crate::cli::AttachCommand;
|
||||||
|
use crate::error;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||||
@@ -180,7 +185,7 @@ impl Repl {
|
|||||||
.context(PlanStatementSnafu)?;
|
.context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
let plan = DFLogicalSubstraitConvertor {}
|
let plan = DFLogicalSubstraitConvertor {}
|
||||||
.encode(&plan)
|
.encode(&plan, DefaultSerializer)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
self.database.logical_plan(plan.to_vec()).await
|
self.database.logical_plan(plan.to_vec()).await
|
||||||
@@ -257,19 +262,30 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
|
|
||||||
let cached_meta_backend =
|
let cached_meta_backend =
|
||||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||||
cached_meta_backend.clone(),
|
CacheRegistryBuilder::default()
|
||||||
]));
|
.add_cache(cached_meta_backend.clone())
|
||||||
let catalog_list = KvBackendCatalogManager::new(
|
.build(),
|
||||||
|
);
|
||||||
|
let fundamental_cache_registry =
|
||||||
|
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.context(error::BuildCacheRegistrySnafu)?
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
Mode::Distributed,
|
Mode::Distributed,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
multi_cache_invalidator,
|
layered_cache_registry,
|
||||||
)
|
);
|
||||||
.await;
|
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_list,
|
catalog_manager,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
|||||||
@@ -1,578 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::api::v1::meta::TableRouteValue;
|
|
||||||
use common_meta::ddl::utils::region_storage_path;
|
|
||||||
use common_meta::error as MetaError;
|
|
||||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
|
||||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
|
||||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
|
||||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
|
||||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
|
||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
|
||||||
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::range_stream::PaginationStream;
|
|
||||||
use common_meta::rpc::router::TableRoute;
|
|
||||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_meta::util::get_prefix_end_key;
|
|
||||||
use common_telemetry::info;
|
|
||||||
use etcd_client::Client;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use prost::Message;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
|
||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
|
||||||
use crate::error::{self, ConnectEtcdSnafu, Result};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct UpgradeCommand {
|
|
||||||
#[clap(long)]
|
|
||||||
etcd_addr: String,
|
|
||||||
#[clap(long)]
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UpgradeCommand {
|
|
||||||
pub async fn build(&self) -> Result<Instance> {
|
|
||||||
let client = Client::connect([&self.etcd_addr], None)
|
|
||||||
.await
|
|
||||||
.context(ConnectEtcdSnafu {
|
|
||||||
etcd_addr: &self.etcd_addr,
|
|
||||||
})?;
|
|
||||||
let tool = MigrateTableMetadata {
|
|
||||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
|
||||||
dryrun: self.dryrun,
|
|
||||||
skip_catalog_keys: self.skip_catalog_keys,
|
|
||||||
skip_table_global_keys: self.skip_table_global_keys,
|
|
||||||
skip_schema_keys: self.skip_schema_keys,
|
|
||||||
skip_table_route_keys: self.skip_table_route_keys,
|
|
||||||
};
|
|
||||||
Ok(Instance::new(Box::new(tool)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigrateTableMetadata {
|
|
||||||
etcd_store: KvBackendRef,
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for MigrateTableMetadata {
|
|
||||||
// migrates database's metadata from 0.3 to 0.4.
|
|
||||||
async fn do_work(&self) -> Result<()> {
|
|
||||||
if !self.skip_table_global_keys {
|
|
||||||
self.migrate_table_global_values().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_catalog_keys {
|
|
||||||
self.migrate_catalog_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_schema_keys {
|
|
||||||
self.migrate_schema_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_table_route_keys {
|
|
||||||
self.migrate_table_route_keys().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const PAGE_SIZE: usize = 1000;
|
|
||||||
|
|
||||||
impl MigrateTableMetadata {
|
|
||||||
async fn migrate_table_route_keys(&self) -> Result<()> {
|
|
||||||
let key = b"__meta_table_route".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let value =
|
|
||||||
TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?;
|
|
||||||
Ok((kv.key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let table_id = self.migrate_table_route_key(value).await?;
|
|
||||||
keys.push(key);
|
|
||||||
keys.push(TableRegionKey::new(table_id).to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<u32> {
|
|
||||||
let table_route = TableRoute::try_from_raw(
|
|
||||||
&value.peers,
|
|
||||||
value.table_route.expect("expected table_route"),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
|
|
||||||
|
|
||||||
let table_id = table_route.table.id as u32;
|
|
||||||
let new_key = TableRouteKey::new(table_id);
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(table_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_keys(&self) -> Result<()> {
|
|
||||||
// The schema key prefix.
|
|
||||||
let key = b"__s".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1SchemaKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_schema_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated SchemaKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
|
|
||||||
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
|
|
||||||
let schema_name_value = SchemaNameValue::default();
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_keys(&self) -> Result<()> {
|
|
||||||
// The catalog key prefix.
|
|
||||||
let key = b"__c".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1CatalogKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_catalog_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated CatalogKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
|
|
||||||
let new_key = CatalogNameKey::new(&key.catalog_name);
|
|
||||||
let catalog_name_value = CatalogNameValue;
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_global_values(&self) -> Result<()> {
|
|
||||||
let key = b"__tg".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end.clone()),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key = String::from_utf8_lossy(kv.key()).to_string();
|
|
||||||
let value = TableGlobalValue::from_bytes(kv.value())
|
|
||||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
|
||||||
|
|
||||||
Ok((key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
self.create_table_name_key(&value).await;
|
|
||||||
|
|
||||||
self.create_datanode_table_keys(&value).await;
|
|
||||||
|
|
||||||
self.split_table_global_value(&key, value).await;
|
|
||||||
|
|
||||||
keys.push(key.as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableGlobalKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
|
|
||||||
for keys in keys.chunks(PAGE_SIZE) {
|
|
||||||
info!("Deleting {} keys", keys.len());
|
|
||||||
let req = BatchDeleteRequest {
|
|
||||||
keys: keys.to_vec(),
|
|
||||||
prev_kv: false,
|
|
||||||
};
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store.batch_delete(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
|
|
||||||
|
|
||||||
let table_info_key = TableInfoKey::new(table_id);
|
|
||||||
let table_info_value = TableInfoValue::new(value.table_info);
|
|
||||||
|
|
||||||
let table_region_key = TableRegionKey::new(table_id);
|
|
||||||
let table_region_value = TableRegionValue::new(region_distribution);
|
|
||||||
|
|
||||||
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.batch_put(
|
|
||||||
BatchPutRequest::new()
|
|
||||||
.add_kv(
|
|
||||||
table_info_key.to_bytes(),
|
|
||||||
table_info_value.try_as_raw_value().unwrap(),
|
|
||||||
)
|
|
||||||
.add_kv(
|
|
||||||
table_region_key.to_bytes(),
|
|
||||||
table_region_value.try_as_raw_value().unwrap(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_table_name_key(&self, value: &TableGlobalValue) {
|
|
||||||
let table_info = &value.table_info;
|
|
||||||
let table_id = value.table_id();
|
|
||||||
|
|
||||||
let table_name_key = TableNameKey::new(
|
|
||||||
&table_info.catalog_name,
|
|
||||||
&table_info.schema_name,
|
|
||||||
&table_info.name,
|
|
||||||
);
|
|
||||||
let table_name_value = TableNameValue::new(table_id);
|
|
||||||
|
|
||||||
info!("Creating '{table_name_key}' => {table_id}");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(table_name_key.to_bytes())
|
|
||||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let engine = value.table_info.meta.engine.as_str();
|
|
||||||
let region_storage_path = region_storage_path(
|
|
||||||
&value.table_info.catalog_name,
|
|
||||||
&value.table_info.schema_name,
|
|
||||||
);
|
|
||||||
let region_distribution: RegionDistribution =
|
|
||||||
value.regions_id_map.clone().into_iter().collect();
|
|
||||||
|
|
||||||
// TODO(niebayes): properly fetch or construct wal options.
|
|
||||||
let region_wal_options = HashMap::default();
|
|
||||||
|
|
||||||
let datanode_table_kvs = region_distribution
|
|
||||||
.into_iter()
|
|
||||||
.map(|(datanode_id, regions)| {
|
|
||||||
let k = DatanodeTableKey::new(datanode_id, table_id);
|
|
||||||
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
|
|
||||||
(
|
|
||||||
k,
|
|
||||||
DatanodeTableValue::new(
|
|
||||||
table_id,
|
|
||||||
regions,
|
|
||||||
RegionInfo {
|
|
||||||
engine: engine.to_string(),
|
|
||||||
region_storage_path: region_storage_path.clone(),
|
|
||||||
region_options: (&value.table_info.meta.options).into(),
|
|
||||||
region_wal_options: region_wal_options.clone(),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
let mut req = BatchPutRequest::new();
|
|
||||||
for (key, value) in datanode_table_kvs {
|
|
||||||
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
|
||||||
}
|
|
||||||
self.etcd_store.batch_put(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
|
|
||||||
mod v1_helper {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
|
|
||||||
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
|
||||||
|
|
||||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
|
||||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
|
||||||
|
|
||||||
/// The pattern of a valid catalog, schema or table name.
|
|
||||||
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref CATALOG_KEY_PATTERN: Regex =
|
|
||||||
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
|
||||||
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
|
|
||||||
))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
|
||||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
pub struct TableGlobalValue {
|
|
||||||
/// Id of datanode that created the global table info kv. only for debugging.
|
|
||||||
pub node_id: u64,
|
|
||||||
/// Allocation of region ids across all datanodes.
|
|
||||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
|
||||||
pub table_info: RawTableInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableGlobalValue {
|
|
||||||
pub fn table_id(&self) -> TableId {
|
|
||||||
self.table_info.ident.table_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CatalogKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for CatalogKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CatalogKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = CATALOG_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CatalogValue;
|
|
||||||
|
|
||||||
pub struct SchemaKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
pub schema_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for SchemaKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.schema_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SchemaKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = SCHEMA_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
schema_name: captures[2].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct SchemaValue;
|
|
||||||
|
|
||||||
macro_rules! define_catalog_value {
|
|
||||||
( $($val_ty: ty), *) => {
|
|
||||||
$(
|
|
||||||
impl $val_ty {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
serde_json::from_str(s.as_ref())
|
|
||||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
|
||||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define_catalog_value!(TableGlobalValue);
|
|
||||||
|
|
||||||
mod err {
|
|
||||||
use snafu::{Location, Snafu};
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(visibility(pub))]
|
|
||||||
pub enum Error {
|
|
||||||
#[snafu(display("Invalid catalog info: {}", key))]
|
|
||||||
InvalidCatalog { key: String, location: Location },
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
|
|
||||||
DeserializeCatalogEntryValue {
|
|
||||||
raw: String,
|
|
||||||
location: Location,
|
|
||||||
source: serde_json::error::Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -18,7 +18,10 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::kvbackend::MetaKvBackend;
|
use catalog::kvbackend::MetaKvBackend;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use common_config::Configurable;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use common_telemetry::logging::TracingOptions;
|
||||||
|
use common_version::{short_version, version};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::DatanodeOptions;
|
use datanode::config::DatanodeOptions;
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
@@ -26,18 +29,29 @@ use datanode::service::DatanodeServiceBuilder;
|
|||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
|
use crate::error::{
|
||||||
use crate::options::{GlobalOptions, Options};
|
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||||
use crate::App;
|
};
|
||||||
|
use crate::options::GlobalOptions;
|
||||||
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
|
pub const APP_NAME: &str = "greptime-datanode";
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
datanode: Datanode,
|
datanode: Datanode,
|
||||||
|
|
||||||
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub fn new(datanode: Datanode) -> Self {
|
pub fn new(datanode: Datanode, guard: Vec<WorkerGuard>) -> Self {
|
||||||
Self { datanode }
|
Self {
|
||||||
|
datanode,
|
||||||
|
_guard: guard,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
pub fn datanode_mut(&mut self) -> &mut Datanode {
|
||||||
@@ -52,7 +66,7 @@ impl Instance {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl App for Instance {
|
impl App for Instance {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"greptime-datanode"
|
APP_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
@@ -78,11 +92,11 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
|
||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
self.subcmd.load_options(global_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -93,13 +107,13 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self, opts: DatanodeOptions) -> Result<Instance> {
|
async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
@@ -114,8 +128,8 @@ struct StartCommand {
|
|||||||
rpc_addr: Option<String>,
|
rpc_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
rpc_hostname: Option<String>,
|
rpc_hostname: Option<String>,
|
||||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||||
metasrv_addr: Option<Vec<String>>,
|
metasrv_addrs: Option<Vec<String>>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
@@ -131,13 +145,23 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
let mut opts: DatanodeOptions = Options::load_layered_options(
|
self.merge_with_cli_options(
|
||||||
self.config_file.as_deref(),
|
global_options,
|
||||||
self.env_prefix.as_ref(),
|
DatanodeOptions::load_layered_options(
|
||||||
DatanodeOptions::env_list_keys(),
|
self.config_file.as_deref(),
|
||||||
)?;
|
self.env_prefix.as_ref(),
|
||||||
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
|
fn merge_with_cli_options(
|
||||||
|
&self,
|
||||||
|
global_options: &GlobalOptions,
|
||||||
|
mut opts: DatanodeOptions,
|
||||||
|
) -> Result<DatanodeOptions> {
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -146,6 +170,11 @@ impl StartCommand {
|
|||||||
opts.logging.level.clone_from(&global_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts.tracing = TracingOptions {
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
opts.rpc_addr.clone_from(addr);
|
opts.rpc_addr.clone_from(addr);
|
||||||
}
|
}
|
||||||
@@ -158,7 +187,7 @@ impl StartCommand {
|
|||||||
opts.node_id = Some(node_id);
|
opts.node_id = Some(node_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
if let Some(metasrv_addrs) = &self.metasrv_addrs {
|
||||||
opts.meta_client
|
opts.meta_client
|
||||||
.get_or_insert_with(MetaClientOptions::default)
|
.get_or_insert_with(MetaClientOptions::default)
|
||||||
.metasrv_addrs
|
.metasrv_addrs
|
||||||
@@ -202,10 +231,18 @@ impl StartCommand {
|
|||||||
// Disable dashboard in datanode.
|
// Disable dashboard in datanode.
|
||||||
opts.http.disable_dashboard = true;
|
opts.http.disable_dashboard = true;
|
||||||
|
|
||||||
Ok(Options::Datanode(Box::new(opts)))
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(self, mut opts: DatanodeOptions) -> Result<Instance> {
|
async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
|
||||||
|
let guard = common_telemetry::init_global_logging(
|
||||||
|
APP_NAME,
|
||||||
|
&opts.logging,
|
||||||
|
&opts.tracing,
|
||||||
|
opts.node_id.map(|x| x.to_string()),
|
||||||
|
);
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
@@ -244,7 +281,7 @@ impl StartCommand {
|
|||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
datanode.setup_services(services);
|
datanode.setup_services(services);
|
||||||
|
|
||||||
Ok(Instance::new(datanode))
|
Ok(Instance::new(datanode, guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,13 +290,14 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use common_config::ENV_VAR_SEP;
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||||
use servers::heartbeat_options::HeartbeatOptions;
|
use servers::heartbeat_options::HeartbeatOptions;
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
use crate::options::GlobalOptions;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_read_from_config_file() {
|
fn test_read_from_config_file() {
|
||||||
@@ -315,10 +353,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||||
assert_eq!(Some(42), options.node_id);
|
assert_eq!(Some(42), options.node_id);
|
||||||
@@ -377,26 +412,22 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_cmd() {
|
fn test_try_from_cmd() {
|
||||||
if let Options::Datanode(opt) = StartCommand::default()
|
let opt = StartCommand::default()
|
||||||
.load_options(&GlobalOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap()
|
.unwrap();
|
||||||
{
|
assert_eq!(Mode::Standalone, opt.mode);
|
||||||
assert_eq!(Mode::Standalone, opt.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Options::Datanode(opt) = (StartCommand {
|
let opt = (StartCommand {
|
||||||
node_id: Some(42),
|
node_id: Some(42),
|
||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&GlobalOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap()
|
.unwrap();
|
||||||
{
|
assert_eq!(Mode::Distributed, opt.mode);
|
||||||
assert_eq!(Mode::Distributed, opt.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!((StartCommand {
|
assert!((StartCommand {
|
||||||
metasrv_addr: Some(vec!["127.0.0.1:3002".to_string()]),
|
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&GlobalOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
@@ -425,7 +456,7 @@ mod tests {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let logging_opt = options.logging_options();
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||||
}
|
}
|
||||||
@@ -505,11 +536,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Datanode(opts) =
|
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
command.load_options(&GlobalOptions::default()).unwrap()
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ use std::any::Any;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use config::ConfigError;
|
|
||||||
use rustyline::error::ReadlineError;
|
use rustyline::error::ReadlineError;
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
@@ -27,97 +26,120 @@ use snafu::{Location, Snafu};
|
|||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[snafu(display("Failed to create default catalog and schema"))]
|
#[snafu(display("Failed to create default catalog and schema"))]
|
||||||
InitMetadata {
|
InitMetadata {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to iter stream"))]
|
#[snafu(display("Failed to iter stream"))]
|
||||||
IterStream {
|
IterStream {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to init DDL manager"))]
|
#[snafu(display("Failed to init DDL manager"))]
|
||||||
InitDdlManager {
|
InitDdlManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to init default timezone"))]
|
#[snafu(display("Failed to init default timezone"))]
|
||||||
InitTimezone {
|
InitTimezone {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_time::error::Error,
|
source: common_time::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start procedure manager"))]
|
#[snafu(display("Failed to start procedure manager"))]
|
||||||
StartProcedureManager {
|
StartProcedureManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_procedure::error::Error,
|
source: common_procedure::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to stop procedure manager"))]
|
#[snafu(display("Failed to stop procedure manager"))]
|
||||||
StopProcedureManager {
|
StopProcedureManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_procedure::error::Error,
|
source: common_procedure::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start wal options allocator"))]
|
#[snafu(display("Failed to start wal options allocator"))]
|
||||||
StartWalOptionsAllocator {
|
StartWalOptionsAllocator {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start datanode"))]
|
#[snafu(display("Failed to start datanode"))]
|
||||||
StartDatanode {
|
StartDatanode {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: datanode::error::Error,
|
source: datanode::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to shutdown datanode"))]
|
#[snafu(display("Failed to shutdown datanode"))]
|
||||||
ShutdownDatanode {
|
ShutdownDatanode {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: datanode::error::Error,
|
source: datanode::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start frontend"))]
|
#[snafu(display("Failed to start frontend"))]
|
||||||
StartFrontend {
|
StartFrontend {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: frontend::error::Error,
|
source: frontend::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to shutdown frontend"))]
|
#[snafu(display("Failed to shutdown frontend"))]
|
||||||
ShutdownFrontend {
|
ShutdownFrontend {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: frontend::error::Error,
|
source: frontend::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to build meta server"))]
|
#[snafu(display("Failed to build meta server"))]
|
||||||
BuildMetaServer {
|
BuildMetaServer {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_srv::error::Error,
|
source: meta_srv::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start meta server"))]
|
#[snafu(display("Failed to start meta server"))]
|
||||||
StartMetaServer {
|
StartMetaServer {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_srv::error::Error,
|
source: meta_srv::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to shutdown meta server"))]
|
#[snafu(display("Failed to shutdown meta server"))]
|
||||||
ShutdownMetaServer {
|
ShutdownMetaServer {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_srv::error::Error,
|
source: meta_srv::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Missing config, msg: {}", msg))]
|
#[snafu(display("Missing config, msg: {}", msg))]
|
||||||
MissingConfig { msg: String, location: Location },
|
MissingConfig {
|
||||||
|
msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Illegal config: {}", msg))]
|
#[snafu(display("Illegal config: {}", msg))]
|
||||||
IllegalConfig { msg: String, location: Location },
|
IllegalConfig {
|
||||||
|
msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Unsupported selector type: {}", selector_type))]
|
#[snafu(display("Unsupported selector type: {}", selector_type))]
|
||||||
UnsupportedSelectorType {
|
UnsupportedSelectorType {
|
||||||
selector_type: String,
|
selector_type: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_srv::error::Error,
|
source: meta_srv::error::Error,
|
||||||
},
|
},
|
||||||
@@ -129,6 +151,7 @@ pub enum Error {
|
|||||||
ReplCreation {
|
ReplCreation {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ReadlineError,
|
error: ReadlineError,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -136,23 +159,36 @@ pub enum Error {
|
|||||||
Readline {
|
Readline {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ReadlineError,
|
error: ReadlineError,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||||
|
RequestDatabase {
|
||||||
|
sql: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
source: client::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to collect RecordBatches"))]
|
#[snafu(display("Failed to collect RecordBatches"))]
|
||||||
CollectRecordBatches {
|
CollectRecordBatches {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_recordbatch::error::Error,
|
source: common_recordbatch::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to pretty print Recordbatches"))]
|
#[snafu(display("Failed to pretty print Recordbatches"))]
|
||||||
PrettyPrintRecordBatches {
|
PrettyPrintRecordBatches {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_recordbatch::error::Error,
|
source: common_recordbatch::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start Meta client"))]
|
#[snafu(display("Failed to start Meta client"))]
|
||||||
StartMetaClient {
|
StartMetaClient {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: meta_client::error::Error,
|
source: meta_client::error::Error,
|
||||||
},
|
},
|
||||||
@@ -160,31 +196,36 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to parse SQL: {}", sql))]
|
#[snafu(display("Failed to parse SQL: {}", sql))]
|
||||||
ParseSql {
|
ParseSql {
|
||||||
sql: String,
|
sql: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: query::error::Error,
|
source: query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to plan statement"))]
|
#[snafu(display("Failed to plan statement"))]
|
||||||
PlanStatement {
|
PlanStatement {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: query::error::Error,
|
source: query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to encode logical plan in substrait"))]
|
#[snafu(display("Failed to encode logical plan in substrait"))]
|
||||||
SubstraitEncodeLogicalPlan {
|
SubstraitEncodeLogicalPlan {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: substrait::error::Error,
|
source: substrait::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to load layered config"))]
|
#[snafu(display("Failed to load layered config"))]
|
||||||
LoadLayeredConfig {
|
LoadLayeredConfig {
|
||||||
#[snafu(source)]
|
#[snafu(source(from(common_config::error::Error, Box::new)))]
|
||||||
error: ConfigError,
|
source: Box<common_config::error::Error>,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to start catalog manager"))]
|
#[snafu(display("Failed to start catalog manager"))]
|
||||||
StartCatalogManager {
|
StartCatalogManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: catalog::error::Error,
|
source: catalog::error::Error,
|
||||||
},
|
},
|
||||||
@@ -194,6 +235,7 @@ pub enum Error {
|
|||||||
etcd_addr: String,
|
etcd_addr: String,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: etcd_client::Error,
|
error: etcd_client::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -201,6 +243,7 @@ pub enum Error {
|
|||||||
ConnectServer {
|
ConnectServer {
|
||||||
addr: String,
|
addr: String,
|
||||||
source: client::error::Error,
|
source: client::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -208,6 +251,7 @@ pub enum Error {
|
|||||||
SerdeJson {
|
SerdeJson {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: serde_json::error::Error,
|
error: serde_json::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -216,17 +260,25 @@ pub enum Error {
|
|||||||
reason: String,
|
reason: String,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: reqwest::Error,
|
error: reqwest::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Expect data from output, but got another thing"))]
|
#[snafu(display("Expect data from output, but got another thing"))]
|
||||||
NotDataFromOutput { location: Location },
|
NotDataFromOutput {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Empty result from output"))]
|
#[snafu(display("Empty result from output"))]
|
||||||
EmptyResult { location: Location },
|
EmptyResult {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to manipulate file"))]
|
#[snafu(display("Failed to manipulate file"))]
|
||||||
FileIo {
|
FileIo {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: std::io::Error,
|
error: std::io::Error,
|
||||||
@@ -234,6 +286,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Invalid database name: {}", database))]
|
#[snafu(display("Invalid database name: {}", database))]
|
||||||
InvalidDatabaseName {
|
InvalidDatabaseName {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
database: String,
|
database: String,
|
||||||
},
|
},
|
||||||
@@ -248,14 +301,30 @@ pub enum Error {
|
|||||||
#[snafu(display("Other error"))]
|
#[snafu(display("Other error"))]
|
||||||
Other {
|
Other {
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to build runtime"))]
|
#[snafu(display("Failed to build runtime"))]
|
||||||
BuildRuntime {
|
BuildRuntime {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_runtime::error::Error,
|
source: common_runtime::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to get cache from cache registry: {}", name))]
|
||||||
|
CacheRequired {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
name: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build cache registry"))]
|
||||||
|
BuildCacheRegistry {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
source: cache::error::Error,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -294,6 +363,7 @@ impl ErrorExt for Error {
|
|||||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||||
StatusCode::Internal
|
StatusCode::Internal
|
||||||
}
|
}
|
||||||
|
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||||
Error::CollectRecordBatches { source, .. }
|
Error::CollectRecordBatches { source, .. }
|
||||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||||
@@ -308,6 +378,8 @@ impl ErrorExt for Error {
|
|||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,14 +16,19 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
|
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::client_manager::DatanodeClients;
|
use client::client_manager::DatanodeClients;
|
||||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
use common_config::Configurable;
|
||||||
|
use common_grpc::channel_manager::ChannelConfig;
|
||||||
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use common_telemetry::logging::TracingOptions;
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
|
use common_version::{short_version, version};
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||||
use frontend::heartbeat::HeartbeatTask;
|
use frontend::heartbeat::HeartbeatTask;
|
||||||
@@ -34,18 +39,29 @@ use meta_client::MetaClientOptions;
|
|||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{self, InitTimezoneSnafu, MissingConfigSnafu, Result, StartFrontendSnafu};
|
use crate::error::{
|
||||||
use crate::options::{GlobalOptions, Options};
|
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
|
||||||
use crate::App;
|
};
|
||||||
|
use crate::options::GlobalOptions;
|
||||||
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
frontend: FeInstance,
|
frontend: FeInstance,
|
||||||
|
|
||||||
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub const APP_NAME: &str = "greptime-frontend";
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub fn new(frontend: FeInstance) -> Self {
|
pub fn new(frontend: FeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||||
Self { frontend }
|
Self {
|
||||||
|
frontend,
|
||||||
|
_guard: guard,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mut_inner(&mut self) -> &mut FeInstance {
|
pub fn mut_inner(&mut self) -> &mut FeInstance {
|
||||||
@@ -60,7 +76,7 @@ impl Instance {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl App for Instance {
|
impl App for Instance {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"greptime-frontend"
|
APP_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
@@ -86,11 +102,11 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
|
||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
|
||||||
self.subcmd.load_options(global_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -101,13 +117,13 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self, opts: FrontendOptions) -> Result<Instance> {
|
async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
@@ -130,8 +146,8 @@ pub struct StartCommand {
|
|||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
influxdb_enable: Option<bool>,
|
influxdb_enable: Option<bool>,
|
||||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
#[clap(long, aliases = ["metasrv-addr"], value_delimiter = ',', num_args = 1..)]
|
||||||
metasrv_addr: Option<Vec<String>>,
|
metasrv_addrs: Option<Vec<String>>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
tls_mode: Option<TlsMode>,
|
tls_mode: Option<TlsMode>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
@@ -147,13 +163,23 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
|
||||||
let mut opts: FrontendOptions = Options::load_layered_options(
|
self.merge_with_cli_options(
|
||||||
self.config_file.as_deref(),
|
global_options,
|
||||||
self.env_prefix.as_ref(),
|
FrontendOptions::load_layered_options(
|
||||||
FrontendOptions::env_list_keys(),
|
self.config_file.as_deref(),
|
||||||
)?;
|
self.env_prefix.as_ref(),
|
||||||
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
|
fn merge_with_cli_options(
|
||||||
|
&self,
|
||||||
|
global_options: &GlobalOptions,
|
||||||
|
mut opts: FrontendOptions,
|
||||||
|
) -> Result<FrontendOptions> {
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -162,6 +188,11 @@ impl StartCommand {
|
|||||||
opts.logging.level.clone_from(&global_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts.tracing = TracingOptions {
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
self.tls_mode.clone(),
|
self.tls_mode.clone(),
|
||||||
self.tls_cert_path.clone(),
|
self.tls_cert_path.clone(),
|
||||||
@@ -182,6 +213,7 @@ impl StartCommand {
|
|||||||
|
|
||||||
if let Some(addr) = &self.rpc_addr {
|
if let Some(addr) = &self.rpc_addr {
|
||||||
opts.grpc.addr.clone_from(addr);
|
opts.grpc.addr.clone_from(addr);
|
||||||
|
opts.grpc.tls = tls_opts.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.mysql_addr {
|
if let Some(addr) = &self.mysql_addr {
|
||||||
@@ -200,7 +232,7 @@ impl StartCommand {
|
|||||||
opts.influxdb.enable = enable;
|
opts.influxdb.enable = enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(metasrv_addrs) = &self.metasrv_addr {
|
if let Some(metasrv_addrs) = &self.metasrv_addrs {
|
||||||
opts.meta_client
|
opts.meta_client
|
||||||
.get_or_insert_with(MetaClientOptions::default)
|
.get_or_insert_with(MetaClientOptions::default)
|
||||||
.metasrv_addrs
|
.metasrv_addrs
|
||||||
@@ -210,10 +242,18 @@ impl StartCommand {
|
|||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider.clone_from(&self.user_provider);
|
||||||
|
|
||||||
Ok(Options::Frontend(Box::new(opts)))
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(self, mut opts: FrontendOptions) -> Result<Instance> {
|
async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
|
||||||
|
let guard = common_telemetry::init_global_logging(
|
||||||
|
APP_NAME,
|
||||||
|
&opts.logging,
|
||||||
|
&opts.tracing,
|
||||||
|
opts.node_id.clone(),
|
||||||
|
);
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
#[allow(clippy::unnecessary_mut_passed)]
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
@@ -242,21 +282,34 @@ impl StartCommand {
|
|||||||
.cache_tti(cache_tti)
|
.cache_tti(cache_tti)
|
||||||
.build();
|
.build();
|
||||||
let cached_meta_backend = Arc::new(cached_meta_backend);
|
let cached_meta_backend = Arc::new(cached_meta_backend);
|
||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
|
||||||
cached_meta_backend.clone(),
|
// Builds cache registry
|
||||||
]));
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||||
|
CacheRegistryBuilder::default()
|
||||||
|
.add_cache(cached_meta_backend.clone())
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
let fundamental_cache_registry =
|
||||||
|
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.context(error::BuildCacheRegistrySnafu)?
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
opts.mode,
|
opts.mode,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
multi_cache_invalidator.clone(),
|
layered_cache_registry.clone(),
|
||||||
)
|
);
|
||||||
.await;
|
|
||||||
|
|
||||||
let executor = HandlerGroupExecutor::new(vec![
|
let executor = HandlerGroupExecutor::new(vec![
|
||||||
Arc::new(ParseMailboxMessageHandler),
|
Arc::new(ParseMailboxMessageHandler),
|
||||||
Arc::new(InvalidateTableCacheHandler::new(
|
Arc::new(InvalidateTableCacheHandler::new(
|
||||||
multi_cache_invalidator.clone(),
|
layered_cache_registry.clone(),
|
||||||
)),
|
)),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
@@ -267,14 +320,23 @@ impl StartCommand {
|
|||||||
Arc::new(executor),
|
Arc::new(executor),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// frontend to datanode need not timeout.
|
||||||
|
// Some queries are expected to take long time.
|
||||||
|
let channel_config = ChannelConfig {
|
||||||
|
timeout: None,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let client = DatanodeClients::new(channel_config);
|
||||||
|
|
||||||
let mut instance = FrontendBuilder::new(
|
let mut instance = FrontendBuilder::new(
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
|
layered_cache_registry.clone(),
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
Arc::new(DatanodeClients::default()),
|
Arc::new(client),
|
||||||
meta_client,
|
meta_client,
|
||||||
)
|
)
|
||||||
.with_plugin(plugins.clone())
|
.with_plugin(plugins.clone())
|
||||||
.with_cache_invalidator(multi_cache_invalidator)
|
.with_local_cache_invalidator(layered_cache_registry)
|
||||||
.with_heartbeat_task(heartbeat_task)
|
.with_heartbeat_task(heartbeat_task)
|
||||||
.try_build()
|
.try_build()
|
||||||
.await
|
.await
|
||||||
@@ -288,7 +350,7 @@ impl StartCommand {
|
|||||||
.build_servers(opts, servers)
|
.build_servers(opts, servers)
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
Ok(Instance::new(instance))
|
Ok(Instance::new(instance, guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,12 +361,13 @@ mod tests {
|
|||||||
|
|
||||||
use auth::{Identity, Password, UserProviderRef};
|
use auth::{Identity, Password, UserProviderRef};
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_config::ENV_VAR_SEP;
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use frontend::service_config::GrpcOptions;
|
use frontend::service_config::GrpcOptions;
|
||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
use crate::options::GlobalOptions;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_try_from_start_command() {
|
fn test_try_from_start_command() {
|
||||||
@@ -317,10 +380,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(opts.http.addr, "127.0.0.1:1234");
|
assert_eq!(opts.http.addr, "127.0.0.1:1234");
|
||||||
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
||||||
@@ -368,10 +428,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(fe_opts) = command.load_options(&GlobalOptions::default()).unwrap()
|
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||||
@@ -424,7 +481,7 @@ mod tests {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let logging_opt = options.logging_options();
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||||
}
|
}
|
||||||
@@ -500,11 +557,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Frontend(fe_opts) =
|
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
command.load_options(&GlobalOptions::default()).unwrap()
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(fe_opts.mysql.runtime_size, 11);
|
assert_eq!(fe_opts.mysql.runtime_size, 11);
|
||||||
|
|||||||
@@ -17,6 +17,8 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use common_telemetry::{error, info};
|
use common_telemetry::{error, info};
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
pub mod datanode;
|
pub mod datanode;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
@@ -35,32 +37,39 @@ pub trait App: Send {
|
|||||||
fn name(&self) -> &str;
|
fn name(&self) -> &str;
|
||||||
|
|
||||||
/// A hook for implementor to make something happened before actual startup. Defaults to no-op.
|
/// A hook for implementor to make something happened before actual startup. Defaults to no-op.
|
||||||
async fn pre_start(&mut self) -> error::Result<()> {
|
async fn pre_start(&mut self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> error::Result<()>;
|
async fn start(&mut self) -> Result<()>;
|
||||||
|
|
||||||
async fn stop(&self) -> error::Result<()>;
|
/// Waits the quit signal by default.
|
||||||
}
|
fn wait_signal(&self) -> bool {
|
||||||
|
true
|
||||||
pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
|
|
||||||
info!("Starting app: {}", app.name());
|
|
||||||
|
|
||||||
app.pre_start().await?;
|
|
||||||
|
|
||||||
app.start().await?;
|
|
||||||
|
|
||||||
if let Err(e) = tokio::signal::ctrl_c().await {
|
|
||||||
error!("Failed to listen for ctrl-c signal: {}", e);
|
|
||||||
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
|
||||||
// the underlying system. So we stop the app instead of running nonetheless to let people
|
|
||||||
// investigate the issue.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
app.stop().await?;
|
async fn stop(&self) -> Result<()>;
|
||||||
info!("Goodbye!");
|
|
||||||
Ok(())
|
async fn run(&mut self) -> Result<()> {
|
||||||
|
info!("Starting app: {}", self.name());
|
||||||
|
|
||||||
|
self.pre_start().await?;
|
||||||
|
|
||||||
|
self.start().await?;
|
||||||
|
|
||||||
|
if self.wait_signal() {
|
||||||
|
if let Err(e) = tokio::signal::ctrl_c().await {
|
||||||
|
error!(e; "Failed to listen for ctrl-c signal");
|
||||||
|
// It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
|
||||||
|
// the underlying system. So we stop the app instead of running nonetheless to let people
|
||||||
|
// investigate the issue.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stop().await?;
|
||||||
|
info!("Goodbye!");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Log the versions of the application, and the arguments passed to the cli.
|
/// Log the versions of the application, and the arguments passed to the cli.
|
||||||
|
|||||||
@@ -16,29 +16,41 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use common_config::Configurable;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
|
use common_telemetry::logging::TracingOptions;
|
||||||
|
use common_version::{short_version, version};
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::MetasrvInstance;
|
||||||
use meta_srv::metasrv::MetasrvOptions;
|
use meta_srv::metasrv::MetasrvOptions;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{self, Result, StartMetaServerSnafu};
|
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||||
use crate::options::{GlobalOptions, Options};
|
use crate::options::GlobalOptions;
|
||||||
use crate::App;
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
|
pub const APP_NAME: &str = "greptime-metasrv";
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
instance: MetasrvInstance,
|
instance: MetasrvInstance,
|
||||||
|
|
||||||
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
fn new(instance: MetasrvInstance) -> Self {
|
fn new(instance: MetasrvInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||||
Self { instance }
|
Self {
|
||||||
|
instance,
|
||||||
|
_guard: guard,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl App for Instance {
|
impl App for Instance {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"greptime-metasrv"
|
APP_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
@@ -64,11 +76,11 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||||
self.subcmd.load_options(global_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -79,13 +91,13 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
|
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
@@ -98,8 +110,8 @@ struct StartCommand {
|
|||||||
bind_addr: Option<String>,
|
bind_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
server_addr: Option<String>,
|
server_addr: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long, aliases = ["store-addr"], value_delimiter = ',', num_args = 1..)]
|
||||||
store_addr: Option<String>,
|
store_addrs: Option<Vec<String>>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
config_file: Option<String>,
|
config_file: Option<String>,
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
@@ -126,13 +138,23 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||||
let mut opts: MetasrvOptions = Options::load_layered_options(
|
self.merge_with_cli_options(
|
||||||
self.config_file.as_deref(),
|
global_options,
|
||||||
self.env_prefix.as_ref(),
|
MetasrvOptions::load_layered_options(
|
||||||
MetasrvOptions::env_list_keys(),
|
self.config_file.as_deref(),
|
||||||
)?;
|
self.env_prefix.as_ref(),
|
||||||
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
|
fn merge_with_cli_options(
|
||||||
|
&self,
|
||||||
|
global_options: &GlobalOptions,
|
||||||
|
mut opts: MetasrvOptions,
|
||||||
|
) -> Result<MetasrvOptions> {
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -141,6 +163,11 @@ impl StartCommand {
|
|||||||
opts.logging.level.clone_from(&global_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts.tracing = TracingOptions {
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(addr) = &self.bind_addr {
|
if let Some(addr) = &self.bind_addr {
|
||||||
opts.bind_addr.clone_from(addr);
|
opts.bind_addr.clone_from(addr);
|
||||||
}
|
}
|
||||||
@@ -149,8 +176,8 @@ impl StartCommand {
|
|||||||
opts.server_addr.clone_from(addr);
|
opts.server_addr.clone_from(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = &self.store_addr {
|
if let Some(addrs) = &self.store_addrs {
|
||||||
opts.store_addr.clone_from(addr);
|
opts.store_addrs.clone_from(addrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(selector_type) = &self.selector {
|
if let Some(selector_type) = &self.selector {
|
||||||
@@ -190,10 +217,14 @@ impl StartCommand {
|
|||||||
// Disable dashboard in metasrv.
|
// Disable dashboard in metasrv.
|
||||||
opts.http.disable_dashboard = true;
|
opts.http.disable_dashboard = true;
|
||||||
|
|
||||||
Ok(Options::Metasrv(Box::new(opts)))
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> {
|
async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
|
||||||
|
let guard =
|
||||||
|
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
.context(StartMetaServerSnafu)?;
|
.context(StartMetaServerSnafu)?;
|
||||||
@@ -210,7 +241,7 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
Ok(Instance::new(instance))
|
Ok(Instance::new(instance, guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,27 +250,25 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_config::ENV_VAR_SEP;
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use meta_srv::selector::SelectorType;
|
use meta_srv::selector::SelectorType;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::ENV_VAR_SEP;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_read_from_cmd() {
|
fn test_read_from_cmd() {
|
||||||
let cmd = StartCommand {
|
let cmd = StartCommand {
|
||||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||||
selector: Some("LoadBased".to_string()),
|
selector: Some("LoadBased".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
|
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
|
||||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,12 +299,10 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(options) = cmd.load_options(&GlobalOptions::default()).unwrap() else {
|
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||||
assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
|
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||||
@@ -309,7 +336,7 @@ mod tests {
|
|||||||
let cmd = StartCommand {
|
let cmd = StartCommand {
|
||||||
bind_addr: Some("127.0.0.1:3002".to_string()),
|
bind_addr: Some("127.0.0.1:3002".to_string()),
|
||||||
server_addr: Some("127.0.0.1:3002".to_string()),
|
server_addr: Some("127.0.0.1:3002".to_string()),
|
||||||
store_addr: Some("127.0.0.1:2380".to_string()),
|
store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
|
||||||
selector: Some("LoadBased".to_string()),
|
selector: Some("LoadBased".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
@@ -324,7 +351,7 @@ mod tests {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let logging_opt = options.logging_options();
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||||
}
|
}
|
||||||
@@ -379,11 +406,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Metasrv(opts) =
|
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
command.load_options(&GlobalOptions::default()).unwrap()
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||||
@@ -395,7 +418,7 @@ mod tests {
|
|||||||
assert_eq!(opts.http.addr, "127.0.0.1:14000");
|
assert_eq!(opts.http.addr, "127.0.0.1:14000");
|
||||||
|
|
||||||
// Should be default value.
|
// Should be default value.
|
||||||
assert_eq!(opts.store_addr, "127.0.0.1:2379");
|
assert_eq!(opts.store_addrs, vec!["127.0.0.1:2379".to_string()]);
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,53 +13,6 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_config::KvBackendConfig;
|
|
||||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
|
||||||
use common_wal::config::MetasrvWalConfig;
|
|
||||||
use config::{Config, Environment, File, FileFormat};
|
|
||||||
use datanode::config::{DatanodeOptions, ProcedureConfig};
|
|
||||||
use frontend::error::{Result as FeResult, TomlFormatSnafu};
|
|
||||||
use frontend::frontend::{FrontendOptions, TomlSerializable};
|
|
||||||
use meta_srv::metasrv::MetasrvOptions;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use snafu::ResultExt;
|
|
||||||
|
|
||||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu};
|
|
||||||
|
|
||||||
pub const ENV_VAR_SEP: &str = "__";
|
|
||||||
pub const ENV_LIST_SEP: &str = ",";
|
|
||||||
|
|
||||||
/// Options mixed up from datanode, frontend and metasrv.
|
|
||||||
#[derive(Serialize, Debug, Clone)]
|
|
||||||
pub struct MixOptions {
|
|
||||||
pub data_home: String,
|
|
||||||
pub procedure: ProcedureConfig,
|
|
||||||
pub metadata_store: KvBackendConfig,
|
|
||||||
pub frontend: FrontendOptions,
|
|
||||||
pub datanode: DatanodeOptions,
|
|
||||||
pub logging: LoggingOptions,
|
|
||||||
pub wal_meta: MetasrvWalConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<MixOptions> for FrontendOptions {
|
|
||||||
fn from(value: MixOptions) -> Self {
|
|
||||||
value.frontend
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TomlSerializable for MixOptions {
|
|
||||||
fn to_toml(&self) -> FeResult<String> {
|
|
||||||
toml::to_string(self).context(TomlFormatSnafu)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum Options {
|
|
||||||
Datanode(Box<DatanodeOptions>),
|
|
||||||
Frontend(Box<FrontendOptions>),
|
|
||||||
Metasrv(Box<MetasrvOptions>),
|
|
||||||
Standalone(Box<MixOptions>),
|
|
||||||
Cli(Box<LoggingOptions>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Parser, Default, Debug, Clone)]
|
#[derive(Parser, Default, Debug, Clone)]
|
||||||
pub struct GlobalOptions {
|
pub struct GlobalOptions {
|
||||||
@@ -76,216 +29,3 @@ pub struct GlobalOptions {
|
|||||||
#[arg(global = true)]
|
#[arg(global = true)]
|
||||||
pub tokio_console_addr: Option<String>,
|
pub tokio_console_addr: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GlobalOptions {
|
|
||||||
pub fn tracing_options(&self) -> TracingOptions {
|
|
||||||
TracingOptions {
|
|
||||||
#[cfg(feature = "tokio-console")]
|
|
||||||
tokio_console_addr: self.tokio_console_addr.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Options {
|
|
||||||
pub fn logging_options(&self) -> &LoggingOptions {
|
|
||||||
match self {
|
|
||||||
Options::Datanode(opts) => &opts.logging,
|
|
||||||
Options::Frontend(opts) => &opts.logging,
|
|
||||||
Options::Metasrv(opts) => &opts.logging,
|
|
||||||
Options::Standalone(opts) => &opts.logging,
|
|
||||||
Options::Cli(opts) => opts,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load the configuration from multiple sources and merge them.
|
|
||||||
/// The precedence order is: config file > environment variables > default values.
|
|
||||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
|
||||||
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
|
||||||
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
|
||||||
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
|
||||||
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
|
||||||
pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
|
|
||||||
config_file: Option<&str>,
|
|
||||||
env_prefix: &str,
|
|
||||||
list_keys: Option<&[&str]>,
|
|
||||||
) -> Result<T> {
|
|
||||||
let default_opts = T::default();
|
|
||||||
|
|
||||||
let env_source = {
|
|
||||||
let mut env = Environment::default();
|
|
||||||
|
|
||||||
if !env_prefix.is_empty() {
|
|
||||||
env = env.prefix(env_prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(list_keys) = list_keys {
|
|
||||||
env = env.list_separator(ENV_LIST_SEP);
|
|
||||||
for key in list_keys {
|
|
||||||
env = env.with_list_parse_key(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
env.try_parsing(true)
|
|
||||||
.separator(ENV_VAR_SEP)
|
|
||||||
.ignore_empty(true)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
|
||||||
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
|
||||||
// within an iterative structure.
|
|
||||||
// See: https://github.com/mehcode/config-rs/issues/461
|
|
||||||
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
|
||||||
let default_config = File::from_str(&json_str, FileFormat::Json);
|
|
||||||
|
|
||||||
// Add default values and environment variables as the sources of the configuration.
|
|
||||||
let mut layered_config = Config::builder()
|
|
||||||
.add_source(default_config)
|
|
||||||
.add_source(env_source);
|
|
||||||
|
|
||||||
// Add config file as the source of the configuration if it is specified.
|
|
||||||
if let Some(config_file) = config_file {
|
|
||||||
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
|
||||||
}
|
|
||||||
|
|
||||||
let opts = layered_config
|
|
||||||
.build()
|
|
||||||
.context(LoadLayeredConfigSnafu)?
|
|
||||||
.try_deserialize()
|
|
||||||
.context(LoadLayeredConfigSnafu)?;
|
|
||||||
|
|
||||||
Ok(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id(&self) -> Option<String> {
|
|
||||||
match self {
|
|
||||||
Options::Metasrv(_) | Options::Cli(_) => None,
|
|
||||||
Options::Datanode(opt) => opt.node_id.map(|x| x.to_string()),
|
|
||||||
Options::Frontend(opt) => opt.node_id.clone(),
|
|
||||||
Options::Standalone(opt) => opt.frontend.node_id.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
|
||||||
use common_wal::config::DatanodeWalConfig;
|
|
||||||
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_load_layered_options() {
|
|
||||||
let mut file = create_named_temp_file();
|
|
||||||
let toml_str = r#"
|
|
||||||
mode = "distributed"
|
|
||||||
enable_memory_catalog = false
|
|
||||||
rpc_addr = "127.0.0.1:3001"
|
|
||||||
rpc_hostname = "127.0.0.1"
|
|
||||||
rpc_runtime_size = 8
|
|
||||||
mysql_addr = "127.0.0.1:4406"
|
|
||||||
mysql_runtime_size = 2
|
|
||||||
|
|
||||||
[meta_client]
|
|
||||||
timeout = "3s"
|
|
||||||
connect_timeout = "5s"
|
|
||||||
tcp_nodelay = true
|
|
||||||
|
|
||||||
[wal]
|
|
||||||
provider = "raft_engine"
|
|
||||||
dir = "/tmp/greptimedb/wal"
|
|
||||||
file_size = "1GB"
|
|
||||||
purge_threshold = "50GB"
|
|
||||||
purge_interval = "10m"
|
|
||||||
read_batch_size = 128
|
|
||||||
sync_write = false
|
|
||||||
|
|
||||||
[logging]
|
|
||||||
level = "debug"
|
|
||||||
dir = "/tmp/greptimedb/test/logs"
|
|
||||||
"#;
|
|
||||||
write!(file, "{}", toml_str).unwrap();
|
|
||||||
|
|
||||||
let env_prefix = "DATANODE_UT";
|
|
||||||
temp_env::with_vars(
|
|
||||||
// The following environment variables will be used to override the values in the config file.
|
|
||||||
[
|
|
||||||
(
|
|
||||||
// storage.type = S3
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"storage".to_uppercase(),
|
|
||||||
"type".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("S3"),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
// storage.bucket = mybucket
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"storage".to_uppercase(),
|
|
||||||
"bucket".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("mybucket"),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
// wal.dir = /other/wal/dir
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"wal".to_uppercase(),
|
|
||||||
"dir".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("/other/wal/dir"),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
|
||||||
[
|
|
||||||
env_prefix.to_string(),
|
|
||||||
"meta_client".to_uppercase(),
|
|
||||||
"metasrv_addrs".to_uppercase(),
|
|
||||||
]
|
|
||||||
.join(ENV_VAR_SEP),
|
|
||||||
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
|| {
|
|
||||||
let opts: DatanodeOptions = Options::load_layered_options(
|
|
||||||
Some(file.path().to_str().unwrap()),
|
|
||||||
env_prefix,
|
|
||||||
DatanodeOptions::env_list_keys(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Check the configs from environment variables.
|
|
||||||
match &opts.storage.store {
|
|
||||||
ObjectStoreConfig::S3(s3_config) => {
|
|
||||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
|
||||||
}
|
|
||||||
_ => panic!("unexpected store type"),
|
|
||||||
}
|
|
||||||
assert_eq!(
|
|
||||||
opts.meta_client.unwrap().metasrv_addrs,
|
|
||||||
vec![
|
|
||||||
"127.0.0.1:3001".to_string(),
|
|
||||||
"127.0.0.1:3002".to_string(),
|
|
||||||
"127.0.0.1:3003".to_string()
|
|
||||||
]
|
|
||||||
);
|
|
||||||
|
|
||||||
// Should be the values from config file, not environment variables.
|
|
||||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
|
||||||
|
|
||||||
// Should be default values.
|
|
||||||
assert_eq!(opts.node_id, None);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,11 +16,13 @@ use std::sync::Arc;
|
|||||||
use std::{fs, path};
|
use std::{fs, path};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
use catalog::kvbackend::KvBackendCatalogManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||||
use common_config::{metadata_store_dir, KvBackendConfig};
|
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||||
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
|
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||||
|
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||||
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
|
||||||
@@ -34,12 +36,14 @@ use common_meta::sequence::SequenceBuilder;
|
|||||||
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::LoggingOptions;
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
|
use common_version::{short_version, version};
|
||||||
use common_wal::config::StandaloneWalConfig;
|
use common_wal::config::StandaloneWalConfig;
|
||||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||||
|
use flow::FlownodeBuilder;
|
||||||
use frontend::frontend::FrontendOptions;
|
use frontend::frontend::FrontendOptions;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||||
@@ -55,14 +59,18 @@ use servers::http::HttpOptions;
|
|||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu,
|
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
||||||
Result, ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||||
|
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{GlobalOptions, MixOptions, Options};
|
use crate::options::GlobalOptions;
|
||||||
use crate::App;
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
|
pub const APP_NAME: &str = "greptime-standalone";
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
@@ -71,11 +79,11 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(self, opts: MixOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
||||||
self.subcmd.load_options(global_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -86,13 +94,13 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
@@ -121,12 +129,7 @@ pub struct StandaloneOptions {
|
|||||||
/// Options for different store engines.
|
/// Options for different store engines.
|
||||||
pub region_engine: Vec<RegionEngineConfig>,
|
pub region_engine: Vec<RegionEngineConfig>,
|
||||||
pub export_metrics: ExportMetricsOption,
|
pub export_metrics: ExportMetricsOption,
|
||||||
}
|
pub tracing: TracingOptions,
|
||||||
|
|
||||||
impl StandaloneOptions {
|
|
||||||
pub fn env_list_keys() -> Option<&'static [&'static str]> {
|
|
||||||
Some(&["wal.broker_endpoints"])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for StandaloneOptions {
|
impl Default for StandaloneOptions {
|
||||||
@@ -153,39 +156,48 @@ impl Default for StandaloneOptions {
|
|||||||
RegionEngineConfig::Mito(MitoConfig::default()),
|
RegionEngineConfig::Mito(MitoConfig::default()),
|
||||||
RegionEngineConfig::File(FileEngineConfig::default()),
|
RegionEngineConfig::File(FileEngineConfig::default()),
|
||||||
],
|
],
|
||||||
|
tracing: TracingOptions::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Configurable<'_> for StandaloneOptions {
|
||||||
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
|
Some(&["wal.broker_endpoints"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl StandaloneOptions {
|
impl StandaloneOptions {
|
||||||
fn frontend_options(self) -> FrontendOptions {
|
pub fn frontend_options(&self) -> FrontendOptions {
|
||||||
|
let cloned_opts = self.clone();
|
||||||
FrontendOptions {
|
FrontendOptions {
|
||||||
mode: self.mode,
|
mode: cloned_opts.mode,
|
||||||
default_timezone: self.default_timezone,
|
default_timezone: cloned_opts.default_timezone,
|
||||||
http: self.http,
|
http: cloned_opts.http,
|
||||||
grpc: self.grpc,
|
grpc: cloned_opts.grpc,
|
||||||
mysql: self.mysql,
|
mysql: cloned_opts.mysql,
|
||||||
postgres: self.postgres,
|
postgres: cloned_opts.postgres,
|
||||||
opentsdb: self.opentsdb,
|
opentsdb: cloned_opts.opentsdb,
|
||||||
influxdb: self.influxdb,
|
influxdb: cloned_opts.influxdb,
|
||||||
prom_store: self.prom_store,
|
prom_store: cloned_opts.prom_store,
|
||||||
meta_client: None,
|
meta_client: None,
|
||||||
logging: self.logging,
|
logging: cloned_opts.logging,
|
||||||
user_provider: self.user_provider,
|
user_provider: cloned_opts.user_provider,
|
||||||
// Handle the export metrics task run by standalone to frontend for execution
|
// Handle the export metrics task run by standalone to frontend for execution
|
||||||
export_metrics: self.export_metrics,
|
export_metrics: cloned_opts.export_metrics,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn datanode_options(self) -> DatanodeOptions {
|
pub fn datanode_options(&self) -> DatanodeOptions {
|
||||||
|
let cloned_opts = self.clone();
|
||||||
DatanodeOptions {
|
DatanodeOptions {
|
||||||
node_id: Some(0),
|
node_id: Some(0),
|
||||||
enable_telemetry: self.enable_telemetry,
|
enable_telemetry: cloned_opts.enable_telemetry,
|
||||||
wal: self.wal.into(),
|
wal: cloned_opts.wal.into(),
|
||||||
storage: self.storage,
|
storage: cloned_opts.storage,
|
||||||
region_engine: self.region_engine,
|
region_engine: cloned_opts.region_engine,
|
||||||
rpc_addr: self.grpc.addr,
|
rpc_addr: cloned_opts.grpc.addr,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -196,12 +208,15 @@ pub struct Instance {
|
|||||||
frontend: FeInstance,
|
frontend: FeInstance,
|
||||||
procedure_manager: ProcedureManagerRef,
|
procedure_manager: ProcedureManagerRef,
|
||||||
wal_options_allocator: WalOptionsAllocatorRef,
|
wal_options_allocator: WalOptionsAllocatorRef,
|
||||||
|
|
||||||
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl App for Instance {
|
impl App for Instance {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"greptime-standalone"
|
APP_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&mut self) -> Result<()> {
|
async fn start(&mut self) -> Result<()> {
|
||||||
@@ -276,21 +291,24 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<Options> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
||||||
let opts: StandaloneOptions = Options::load_layered_options(
|
self.merge_with_cli_options(
|
||||||
self.config_file.as_deref(),
|
global_options,
|
||||||
self.env_prefix.as_ref(),
|
StandaloneOptions::load_layered_options(
|
||||||
StandaloneOptions::env_list_keys(),
|
self.config_file.as_deref(),
|
||||||
)?;
|
self.env_prefix.as_ref(),
|
||||||
|
)
|
||||||
self.convert_options(global_options, opts)
|
.context(LoadLayeredConfigSnafu)?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_options(
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
|
pub fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
global_options: &GlobalOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: StandaloneOptions,
|
mut opts: StandaloneOptions,
|
||||||
) -> Result<Options> {
|
) -> Result<StandaloneOptions> {
|
||||||
|
// Should always be standalone mode.
|
||||||
opts.mode = Mode::Standalone;
|
opts.mode = Mode::Standalone;
|
||||||
|
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
@@ -301,6 +319,11 @@ impl StartCommand {
|
|||||||
opts.logging.level.clone_from(&global_options.log_level);
|
opts.logging.level.clone_from(&global_options.log_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts.tracing = TracingOptions {
|
||||||
|
#[cfg(feature = "tokio-console")]
|
||||||
|
tokio_console_addr: global_options.tokio_console_addr.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
let tls_opts = TlsOption::new(
|
let tls_opts = TlsOption::new(
|
||||||
self.tls_mode.clone(),
|
self.tls_mode.clone(),
|
||||||
self.tls_cert_path.clone(),
|
self.tls_cert_path.clone(),
|
||||||
@@ -323,8 +346,7 @@ impl StartCommand {
|
|||||||
msg: format!(
|
msg: format!(
|
||||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||||
),
|
),
|
||||||
}
|
}.fail();
|
||||||
.fail();
|
|
||||||
}
|
}
|
||||||
opts.grpc.addr.clone_from(addr)
|
opts.grpc.addr.clone_from(addr)
|
||||||
}
|
}
|
||||||
@@ -347,47 +369,36 @@ impl StartCommand {
|
|||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider.clone_from(&self.user_provider);
|
||||||
|
|
||||||
let metadata_store = opts.metadata_store.clone();
|
Ok(opts)
|
||||||
let procedure = opts.procedure.clone();
|
|
||||||
let frontend = opts.clone().frontend_options();
|
|
||||||
let logging = opts.logging.clone();
|
|
||||||
let wal_meta = opts.wal.clone().into();
|
|
||||||
let datanode = opts.datanode_options().clone();
|
|
||||||
|
|
||||||
Ok(Options::Standalone(Box::new(MixOptions {
|
|
||||||
procedure,
|
|
||||||
metadata_store,
|
|
||||||
data_home: datanode.storage.data_home.to_string(),
|
|
||||||
frontend,
|
|
||||||
datanode,
|
|
||||||
logging,
|
|
||||||
wal_meta,
|
|
||||||
})))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unreachable_code)]
|
#[allow(unreachable_code)]
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
#[allow(clippy::diverging_sub_expression)]
|
#[allow(clippy::diverging_sub_expression)]
|
||||||
async fn build(self, opts: MixOptions) -> Result<Instance> {
|
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
||||||
|
let guard =
|
||||||
|
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
info!("Standalone start command: {:#?}", self);
|
info!("Standalone start command: {:#?}", self);
|
||||||
info!("Building standalone instance with {opts:#?}");
|
info!("Building standalone instance with {opts:#?}");
|
||||||
|
|
||||||
let mut fe_opts = opts.frontend;
|
let mut fe_opts = opts.frontend_options();
|
||||||
#[allow(clippy::unnecessary_mut_passed)]
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
let dn_opts = opts.datanode;
|
let dn_opts = opts.datanode_options();
|
||||||
|
|
||||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||||
|
|
||||||
|
let data_home = &dn_opts.storage.data_home;
|
||||||
// Ensure the data_home directory exists.
|
// Ensure the data_home directory exists.
|
||||||
fs::create_dir_all(path::Path::new(&opts.data_home)).context(CreateDirSnafu {
|
fs::create_dir_all(path::Path::new(data_home))
|
||||||
dir: &opts.data_home,
|
.context(CreateDirSnafu { dir: data_home })?;
|
||||||
})?;
|
|
||||||
|
|
||||||
let metadata_dir = metadata_store_dir(&opts.data_home);
|
let metadata_dir = metadata_store_dir(data_home);
|
||||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||||
metadata_dir,
|
metadata_dir,
|
||||||
opts.metadata_store.clone(),
|
opts.metadata_store.clone(),
|
||||||
@@ -396,20 +407,44 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
|
// Builds cache registry
|
||||||
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default();
|
||||||
|
let fundamental_cache_registry = build_fundamental_cache_registry(kv_backend.clone());
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.context(BuildCacheRegistrySnafu)?
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
dn_opts.mode,
|
dn_opts.mode,
|
||||||
None,
|
None,
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
multi_cache_invalidator.clone(),
|
layered_cache_registry.clone(),
|
||||||
)
|
);
|
||||||
.await;
|
|
||||||
|
let table_metadata_manager =
|
||||||
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
|
|
||||||
|
let flow_builder = FlownodeBuilder::new(
|
||||||
|
1,
|
||||||
|
Default::default(),
|
||||||
|
fe_plugins.clone(),
|
||||||
|
table_metadata_manager.clone(),
|
||||||
|
catalog_manager.clone(),
|
||||||
|
);
|
||||||
|
let flownode = Arc::new(flow_builder.build().await);
|
||||||
|
|
||||||
let builder =
|
let builder =
|
||||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
||||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
|
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||||
|
region_server: datanode.region_server(),
|
||||||
|
flow_server: flownode.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
let table_id_sequence = Arc::new(
|
let table_id_sequence = Arc::new(
|
||||||
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
|
SequenceBuilder::new(TABLE_ID_SEQ, kv_backend.clone())
|
||||||
@@ -424,11 +459,9 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
let wal_options_allocator = Arc::new(WalOptionsAllocator::new(
|
||||||
opts.wal_meta.clone(),
|
opts.wal.into(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
));
|
));
|
||||||
let table_metadata_manager =
|
|
||||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
|
||||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
|
||||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||||
table_id_sequence,
|
table_id_sequence,
|
||||||
@@ -441,7 +474,7 @@ impl StartCommand {
|
|||||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
node_manager.clone(),
|
node_manager.clone(),
|
||||||
multi_cache_invalidator,
|
layered_cache_registry.clone(),
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
table_meta_allocator,
|
table_meta_allocator,
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
@@ -449,12 +482,24 @@ impl StartCommand {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut frontend =
|
let mut frontend = FrontendBuilder::new(
|
||||||
FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
|
kv_backend,
|
||||||
.with_plugin(fe_plugins.clone())
|
layered_cache_registry,
|
||||||
.try_build()
|
catalog_manager,
|
||||||
.await
|
node_manager,
|
||||||
.context(StartFrontendSnafu)?;
|
ddl_task_executor,
|
||||||
|
)
|
||||||
|
.with_plugin(fe_plugins.clone())
|
||||||
|
.try_build()
|
||||||
|
.await
|
||||||
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
|
// flow server need to be able to use frontend to write insert requests back
|
||||||
|
flownode
|
||||||
|
.set_frontend_invoker(Box::new(frontend.clone()))
|
||||||
|
.await;
|
||||||
|
// TODO(discord9): unify with adding `start` and `shutdown` method to flownode too.
|
||||||
|
let _handle = flownode.clone().run_background();
|
||||||
|
|
||||||
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
|
||||||
.build()
|
.build()
|
||||||
@@ -469,6 +514,7 @@ impl StartCommand {
|
|||||||
frontend,
|
frontend,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
wal_options_allocator,
|
wal_options_allocator,
|
||||||
|
_guard: guard,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -523,13 +569,14 @@ mod tests {
|
|||||||
|
|
||||||
use auth::{Identity, Password, UserProviderRef};
|
use auth::{Identity, Password, UserProviderRef};
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_config::ENV_VAR_SEP;
|
||||||
use common_test_util::temp_dir::create_named_temp_file;
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::{FileConfig, GcsConfig};
|
use datanode::config::{FileConfig, GcsConfig};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::options::{GlobalOptions, ENV_VAR_SEP};
|
use crate::options::GlobalOptions;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_try_from_start_command_to_anymap() {
|
async fn test_try_from_start_command_to_anymap() {
|
||||||
@@ -617,12 +664,9 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(options) = cmd.load_options(&GlobalOptions::default()).unwrap()
|
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
||||||
else {
|
let fe_opts = options.frontend_options();
|
||||||
unreachable!()
|
let dn_opts = options.datanode_options();
|
||||||
};
|
|
||||||
let fe_opts = options.frontend;
|
|
||||||
let dn_opts = options.datanode;
|
|
||||||
let logging_opts = options.logging;
|
let logging_opts = options.logging;
|
||||||
assert_eq!(Mode::Standalone, fe_opts.mode);
|
assert_eq!(Mode::Standalone, fe_opts.mode);
|
||||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||||
@@ -673,7 +717,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(opts) = cmd
|
let opts = cmd
|
||||||
.load_options(&GlobalOptions {
|
.load_options(&GlobalOptions {
|
||||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||||
log_level: Some("debug".to_string()),
|
log_level: Some("debug".to_string()),
|
||||||
@@ -681,10 +725,7 @@ mod tests {
|
|||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
tokio_console_addr: None,
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap();
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||||
assert_eq!("debug", opts.logging.level.unwrap());
|
assert_eq!("debug", opts.logging.level.unwrap());
|
||||||
@@ -746,11 +787,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let Options::Standalone(opts) =
|
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
||||||
command.load_options(&GlobalOptions::default()).unwrap()
|
|
||||||
else {
|
|
||||||
unreachable!()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||||
@@ -759,19 +796,20 @@ mod tests {
|
|||||||
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
|
assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
|
||||||
|
|
||||||
// Should be read from cli, cli > config file > env > default values.
|
// Should be read from cli, cli > config file > env > default values.
|
||||||
assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
|
let fe_opts = opts.frontend_options();
|
||||||
assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
|
assert_eq!(fe_opts.http.addr, "127.0.0.1:14000");
|
||||||
|
assert_eq!(ReadableSize::mb(64), fe_opts.http.body_limit);
|
||||||
|
|
||||||
// Should be default value.
|
// Should be default value.
|
||||||
assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
|
assert_eq!(fe_opts.grpc.addr, GrpcOptions::default().addr);
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_load_default_standalone_options() {
|
fn test_load_default_standalone_options() {
|
||||||
let options: StandaloneOptions =
|
let options =
|
||||||
Options::load_layered_options(None, "GREPTIMEDB_FRONTEND", None).unwrap();
|
StandaloneOptions::load_layered_options(None, "GREPTIMEDB_STANDALONE").unwrap();
|
||||||
let default_options = StandaloneOptions::default();
|
let default_options = StandaloneOptions::default();
|
||||||
assert_eq!(options.mode, default_options.mode);
|
assert_eq!(options.mode, default_options.mode);
|
||||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||||
|
|||||||
@@ -33,16 +33,21 @@ pub enum Error {
|
|||||||
Overflow {
|
Overflow {
|
||||||
src_len: usize,
|
src_len: usize,
|
||||||
dst_len: usize,
|
dst_len: usize,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Buffer underflow"))]
|
#[snafu(display("Buffer underflow"))]
|
||||||
Underflow { location: Location },
|
Underflow {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("IO operation reach EOF"))]
|
#[snafu(display("IO operation reach EOF"))]
|
||||||
Eof {
|
Eof {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: std::io::Error,
|
error: std::io::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Invalid full table name: {}", table_name))]
|
#[snafu(display("Invalid full table name: {}", table_name))]
|
||||||
InvalidFullTableName {
|
InvalidFullTableName {
|
||||||
table_name: String,
|
table_name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,22 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
|
common-error.workspace = true
|
||||||
|
common-macro.workspace = true
|
||||||
|
config.workspace = true
|
||||||
num_cpus.workspace = true
|
num_cpus.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
snafu.workspace = true
|
||||||
sysinfo.workspace = true
|
sysinfo.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
common-telemetry.workspace = true
|
||||||
|
common-test-util.workspace = true
|
||||||
|
common-wal.workspace = true
|
||||||
|
datanode.workspace = true
|
||||||
|
meta-client.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
temp-env = "0.3"
|
||||||
|
tempfile.workspace = true
|
||||||
|
|||||||
248
src/common/config/src/config.rs
Normal file
248
src/common/config/src/config.rs
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use config::{Environment, File, FileFormat};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use snafu::ResultExt;
|
||||||
|
|
||||||
|
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
||||||
|
|
||||||
|
/// Separator for environment variables. For example, `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN`.
|
||||||
|
pub const ENV_VAR_SEP: &str = "__";
|
||||||
|
|
||||||
|
/// Separator for list values in environment variables. For example, `localhost:3001,localhost:3002,localhost:3003`.
|
||||||
|
pub const ENV_LIST_SEP: &str = ",";
|
||||||
|
|
||||||
|
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
|
||||||
|
pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
|
||||||
|
/// Load the configuration from multiple sources and merge them.
|
||||||
|
/// The precedence order is: config file > environment variables > default values.
|
||||||
|
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||||
|
/// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
|
||||||
|
/// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
|
||||||
|
/// `list_keys` is the list of keys that should be parsed as a list, for example, you can pass `Some(&["meta_client_options.metasrv_addrs"]` to parse `GREPTIMEDB_METASRV__META_CLIENT_OPTIONS__METASRV_ADDRS` as a list.
|
||||||
|
/// The function will use comma `,` as the separator for list values, for example: `127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003`.
|
||||||
|
fn load_layered_options(config_file: Option<&str>, env_prefix: &str) -> Result<Self> {
|
||||||
|
let default_opts = Self::default();
|
||||||
|
|
||||||
|
let env_source = {
|
||||||
|
let mut env = Environment::default();
|
||||||
|
|
||||||
|
if !env_prefix.is_empty() {
|
||||||
|
env = env.prefix(env_prefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(list_keys) = Self::env_list_keys() {
|
||||||
|
env = env.list_separator(ENV_LIST_SEP);
|
||||||
|
for key in list_keys {
|
||||||
|
env = env.with_list_parse_key(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env.try_parsing(true)
|
||||||
|
.separator(ENV_VAR_SEP)
|
||||||
|
.ignore_empty(true)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Workaround: Replacement for `Config::try_from(&default_opts)` due to
|
||||||
|
// `ConfigSerializer` cannot handle the case of an empty struct contained
|
||||||
|
// within an iterative structure.
|
||||||
|
// See: https://github.com/mehcode/config-rs/issues/461
|
||||||
|
let json_str = serde_json::to_string(&default_opts).context(SerdeJsonSnafu)?;
|
||||||
|
let default_config = File::from_str(&json_str, FileFormat::Json);
|
||||||
|
|
||||||
|
// Add default values and environment variables as the sources of the configuration.
|
||||||
|
let mut layered_config = config::Config::builder()
|
||||||
|
.add_source(default_config)
|
||||||
|
.add_source(env_source);
|
||||||
|
|
||||||
|
// Add config file as the source of the configuration if it is specified.
|
||||||
|
if let Some(config_file) = config_file {
|
||||||
|
layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
|
||||||
|
}
|
||||||
|
|
||||||
|
let opts = layered_config
|
||||||
|
.build()
|
||||||
|
.and_then(|x| x.try_deserialize())
|
||||||
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
Ok(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List of toml keys that should be parsed as a list.
|
||||||
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize the configuration to a TOML string.
|
||||||
|
fn to_toml(&self) -> Result<String> {
|
||||||
|
toml::to_string(&self).context(TomlFormatSnafu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
|
use common_telemetry::logging::LoggingOptions;
|
||||||
|
use common_test_util::temp_dir::create_named_temp_file;
|
||||||
|
use common_wal::config::DatanodeWalConfig;
|
||||||
|
use datanode::config::{ObjectStoreConfig, StorageConfig};
|
||||||
|
use meta_client::MetaClientOptions;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::Mode;
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct TestDatanodeConfig {
|
||||||
|
mode: Mode,
|
||||||
|
node_id: Option<u64>,
|
||||||
|
logging: LoggingOptions,
|
||||||
|
meta_client: Option<MetaClientOptions>,
|
||||||
|
wal: DatanodeWalConfig,
|
||||||
|
storage: StorageConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TestDatanodeConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
mode: Mode::Distributed,
|
||||||
|
node_id: None,
|
||||||
|
logging: LoggingOptions::default(),
|
||||||
|
meta_client: None,
|
||||||
|
wal: DatanodeWalConfig::default(),
|
||||||
|
storage: StorageConfig::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Configurable<'_> for TestDatanodeConfig {
|
||||||
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
|
Some(&["meta_client.metasrv_addrs"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_load_layered_options() {
|
||||||
|
let mut file = create_named_temp_file();
|
||||||
|
let toml_str = r#"
|
||||||
|
mode = "distributed"
|
||||||
|
enable_memory_catalog = false
|
||||||
|
rpc_addr = "127.0.0.1:3001"
|
||||||
|
rpc_hostname = "127.0.0.1"
|
||||||
|
rpc_runtime_size = 8
|
||||||
|
mysql_addr = "127.0.0.1:4406"
|
||||||
|
mysql_runtime_size = 2
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
timeout = "3s"
|
||||||
|
connect_timeout = "5s"
|
||||||
|
tcp_nodelay = true
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
provider = "raft_engine"
|
||||||
|
dir = "/tmp/greptimedb/wal"
|
||||||
|
file_size = "1GB"
|
||||||
|
purge_threshold = "50GB"
|
||||||
|
purge_interval = "10m"
|
||||||
|
read_batch_size = 128
|
||||||
|
sync_write = false
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
level = "debug"
|
||||||
|
dir = "/tmp/greptimedb/test/logs"
|
||||||
|
"#;
|
||||||
|
write!(file, "{}", toml_str).unwrap();
|
||||||
|
|
||||||
|
let env_prefix = "DATANODE_UT";
|
||||||
|
temp_env::with_vars(
|
||||||
|
// The following environment variables will be used to override the values in the config file.
|
||||||
|
[
|
||||||
|
(
|
||||||
|
// storage.type = S3
|
||||||
|
[
|
||||||
|
env_prefix.to_string(),
|
||||||
|
"storage".to_uppercase(),
|
||||||
|
"type".to_uppercase(),
|
||||||
|
]
|
||||||
|
.join(ENV_VAR_SEP),
|
||||||
|
Some("S3"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// storage.bucket = mybucket
|
||||||
|
[
|
||||||
|
env_prefix.to_string(),
|
||||||
|
"storage".to_uppercase(),
|
||||||
|
"bucket".to_uppercase(),
|
||||||
|
]
|
||||||
|
.join(ENV_VAR_SEP),
|
||||||
|
Some("mybucket"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// wal.dir = /other/wal/dir
|
||||||
|
[
|
||||||
|
env_prefix.to_string(),
|
||||||
|
"wal".to_uppercase(),
|
||||||
|
"dir".to_uppercase(),
|
||||||
|
]
|
||||||
|
.join(ENV_VAR_SEP),
|
||||||
|
Some("/other/wal/dir"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// meta_client.metasrv_addrs = 127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003
|
||||||
|
[
|
||||||
|
env_prefix.to_string(),
|
||||||
|
"meta_client".to_uppercase(),
|
||||||
|
"metasrv_addrs".to_uppercase(),
|
||||||
|
]
|
||||||
|
.join(ENV_VAR_SEP),
|
||||||
|
Some("127.0.0.1:3001,127.0.0.1:3002,127.0.0.1:3003"),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
|| {
|
||||||
|
let opts = TestDatanodeConfig::load_layered_options(
|
||||||
|
Some(file.path().to_str().unwrap()),
|
||||||
|
env_prefix,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check the configs from environment variables.
|
||||||
|
match &opts.storage.store {
|
||||||
|
ObjectStoreConfig::S3(s3_config) => {
|
||||||
|
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||||
|
}
|
||||||
|
_ => panic!("unexpected store type"),
|
||||||
|
}
|
||||||
|
assert_eq!(
|
||||||
|
opts.meta_client.unwrap().metasrv_addrs,
|
||||||
|
vec![
|
||||||
|
"127.0.0.1:3001".to_string(),
|
||||||
|
"127.0.0.1:3002".to_string(),
|
||||||
|
"127.0.0.1:3003".to_string()
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should be the values from config file, not environment variables.
|
||||||
|
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||||
|
unreachable!()
|
||||||
|
};
|
||||||
|
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||||
|
|
||||||
|
// Should be default values.
|
||||||
|
assert_eq!(opts.node_id, None);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
67
src/common/config/src/error.rs
Normal file
67
src/common/config/src/error.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::any::Any;
|
||||||
|
|
||||||
|
use common_error::ext::ErrorExt;
|
||||||
|
use common_error::status_code::StatusCode;
|
||||||
|
use common_macro::stack_trace_debug;
|
||||||
|
use config::ConfigError;
|
||||||
|
use snafu::{Location, Snafu};
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
#[derive(Snafu)]
|
||||||
|
#[snafu(visibility(pub))]
|
||||||
|
#[stack_trace_debug]
|
||||||
|
pub enum Error {
|
||||||
|
#[snafu(display("Failed to load layered config"))]
|
||||||
|
LoadLayeredConfig {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: ConfigError,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to serde json"))]
|
||||||
|
SerdeJson {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: serde_json::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to serialize options to TOML"))]
|
||||||
|
TomlFormat {
|
||||||
|
#[snafu(source)]
|
||||||
|
error: toml::ser::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ErrorExt for Error {
|
||||||
|
fn status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::TomlFormat { .. } | Error::LoadLayeredConfig { .. } => {
|
||||||
|
StatusCode::InvalidArguments
|
||||||
|
}
|
||||||
|
Error::SerdeJson { .. } => StatusCode::Unexpected,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,9 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod config;
|
||||||
|
pub mod error;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
|
pub use config::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub fn metadata_store_dir(store_dir: &str) -> String {
|
pub fn metadata_store_dir(store_dir: &str) -> String {
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ derive_builder.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
orc-rust = { git = "https://github.com/MichaelScofield/orc-rs.git", rev = "17347f5f084ac937863317df882218055c4ea8c1" }
|
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
regex = "1.7"
|
regex = "1.7"
|
||||||
|
|||||||
@@ -29,27 +29,38 @@ pub enum Error {
|
|||||||
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
#[snafu(display("Unsupported compression type: {}", compression_type))]
|
||||||
UnsupportedCompressionType {
|
UnsupportedCompressionType {
|
||||||
compression_type: String,
|
compression_type: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
|
#[snafu(display("Unsupported backend protocol: {}, url: {}", protocol, url))]
|
||||||
UnsupportedBackendProtocol {
|
UnsupportedBackendProtocol {
|
||||||
protocol: String,
|
protocol: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
url: String,
|
url: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Unsupported format protocol: {}", format))]
|
#[snafu(display("Unsupported format protocol: {}", format))]
|
||||||
UnsupportedFormat { format: String, location: Location },
|
UnsupportedFormat {
|
||||||
|
format: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("empty host: {}", url))]
|
#[snafu(display("empty host: {}", url))]
|
||||||
EmptyHostPath { url: String, location: Location },
|
EmptyHostPath {
|
||||||
|
url: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid url: {}", url))]
|
#[snafu(display("Invalid url: {}", url))]
|
||||||
InvalidUrl {
|
InvalidUrl {
|
||||||
url: String,
|
url: String,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ParseError,
|
error: ParseError,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -57,19 +68,22 @@ pub enum Error {
|
|||||||
BuildBackend {
|
BuildBackend {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: object_store::Error,
|
error: object_store::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to build orc reader"))]
|
#[snafu(display("Failed to build orc reader"))]
|
||||||
OrcReader {
|
OrcReader {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: orc_rust::error::Error,
|
error: orc_rust::error::OrcError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to read object from path: {}", path))]
|
#[snafu(display("Failed to read object from path: {}", path))]
|
||||||
ReadObject {
|
ReadObject {
|
||||||
path: String,
|
path: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: object_store::Error,
|
error: object_store::Error,
|
||||||
@@ -78,6 +92,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to write object to path: {}", path))]
|
#[snafu(display("Failed to write object to path: {}", path))]
|
||||||
WriteObject {
|
WriteObject {
|
||||||
path: String,
|
path: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: object_store::Error,
|
error: object_store::Error,
|
||||||
@@ -87,11 +102,13 @@ pub enum Error {
|
|||||||
AsyncWrite {
|
AsyncWrite {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: std::io::Error,
|
error: std::io::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to write record batch"))]
|
#[snafu(display("Failed to write record batch"))]
|
||||||
WriteRecordBatch {
|
WriteRecordBatch {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ArrowError,
|
error: ArrowError,
|
||||||
@@ -99,6 +116,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to encode record batch"))]
|
#[snafu(display("Failed to encode record batch"))]
|
||||||
EncodeRecordBatch {
|
EncodeRecordBatch {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ParquetError,
|
error: ParquetError,
|
||||||
@@ -106,6 +124,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to read record batch"))]
|
#[snafu(display("Failed to read record batch"))]
|
||||||
ReadRecordBatch {
|
ReadRecordBatch {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: datafusion::error::DataFusionError,
|
error: datafusion::error::DataFusionError,
|
||||||
@@ -113,6 +132,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to read parquet"))]
|
#[snafu(display("Failed to read parquet"))]
|
||||||
ReadParquetSnafu {
|
ReadParquetSnafu {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: datafusion::parquet::errors::ParquetError,
|
error: datafusion::parquet::errors::ParquetError,
|
||||||
@@ -120,6 +140,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to convert parquet to schema"))]
|
#[snafu(display("Failed to convert parquet to schema"))]
|
||||||
ParquetToSchema {
|
ParquetToSchema {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: datafusion::parquet::errors::ParquetError,
|
error: datafusion::parquet::errors::ParquetError,
|
||||||
@@ -127,6 +148,7 @@ pub enum Error {
|
|||||||
|
|
||||||
#[snafu(display("Failed to infer schema from file"))]
|
#[snafu(display("Failed to infer schema from file"))]
|
||||||
InferSchema {
|
InferSchema {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: arrow_schema::ArrowError,
|
error: arrow_schema::ArrowError,
|
||||||
@@ -135,16 +157,22 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to list object in path: {}", path))]
|
#[snafu(display("Failed to list object in path: {}", path))]
|
||||||
ListObjects {
|
ListObjects {
|
||||||
path: String,
|
path: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: object_store::Error,
|
error: object_store::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid connection: {}", msg))]
|
#[snafu(display("Invalid connection: {}", msg))]
|
||||||
InvalidConnection { msg: String, location: Location },
|
InvalidConnection {
|
||||||
|
msg: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to join handle"))]
|
#[snafu(display("Failed to join handle"))]
|
||||||
JoinHandle {
|
JoinHandle {
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: tokio::task::JoinError,
|
error: tokio::task::JoinError,
|
||||||
@@ -154,6 +182,7 @@ pub enum Error {
|
|||||||
ParseFormat {
|
ParseFormat {
|
||||||
key: &'static str,
|
key: &'static str,
|
||||||
value: String,
|
value: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -161,15 +190,20 @@ pub enum Error {
|
|||||||
MergeSchema {
|
MergeSchema {
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: arrow_schema::ArrowError,
|
error: arrow_schema::ArrowError,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Buffered writer closed"))]
|
#[snafu(display("Buffered writer closed"))]
|
||||||
BufferedWriterClosed { location: Location },
|
BufferedWriterClosed {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||||
WriteParquet {
|
WriteParquet {
|
||||||
path: String,
|
path: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: parquet::errors::ParquetError,
|
error: parquet::errors::ParquetError,
|
||||||
|
|||||||
@@ -21,9 +21,8 @@ use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener
|
|||||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::{StreamExt, TryStreamExt};
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use orc_rust::arrow_reader::{create_arrow_schema, Cursor};
|
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||||
use orc_rust::reader::Reader;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio::io::{AsyncRead, AsyncSeek};
|
use tokio::io::{AsyncRead, AsyncSeek};
|
||||||
|
|
||||||
@@ -33,28 +32,20 @@ use crate::file_format::FileFormat;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub struct OrcFormat;
|
pub struct OrcFormat;
|
||||||
|
|
||||||
pub async fn new_orc_cursor<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
|
||||||
reader: R,
|
|
||||||
) -> Result<Cursor<R>> {
|
|
||||||
let reader = Reader::new_async(reader)
|
|
||||||
.await
|
|
||||||
.context(error::OrcReaderSnafu)?;
|
|
||||||
let cursor = Cursor::root(reader).context(error::OrcReaderSnafu)?;
|
|
||||||
Ok(cursor)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||||
reader: R,
|
reader: R,
|
||||||
) -> Result<ArrowStreamReader<R>> {
|
) -> Result<ArrowStreamReader<R>> {
|
||||||
let cursor = new_orc_cursor(reader).await?;
|
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
||||||
Ok(ArrowStreamReader::new(cursor, None))
|
.await
|
||||||
|
.context(error::OrcReaderSnafu)?;
|
||||||
|
Ok(reader_build.build_async())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
||||||
reader: R,
|
reader: R,
|
||||||
) -> Result<Schema> {
|
) -> Result<Schema> {
|
||||||
let cursor = new_orc_cursor(reader).await?;
|
let reader = new_orc_stream_reader(reader).await?;
|
||||||
Ok(create_arrow_schema(&cursor))
|
Ok(reader.schema().as_ref().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ pub enum Error {
|
|||||||
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
#[snafu(display("Decimal out of range, decimal value: {}", value))]
|
||||||
BigDecimalOutOfRange {
|
BigDecimalOutOfRange {
|
||||||
value: BigDecimal,
|
value: BigDecimal,
|
||||||
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -43,7 +44,11 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
#[snafu(display("Invalid precision or scale, resion: {}", reason))]
|
||||||
InvalidPrecisionOrScale { reason: String, location: Location },
|
InvalidPrecisionOrScale {
|
||||||
|
reason: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user