mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-09 06:42:57 +00:00
Compare commits
67 Commits
flow_fix_f
...
v0.8.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf3ad44584 | ||
|
|
11a903f193 | ||
|
|
acdfaabfa5 | ||
|
|
54ca06ba08 | ||
|
|
1f315e300f | ||
|
|
573e25a40f | ||
|
|
f8ec46493f | ||
|
|
14a2d83594 | ||
|
|
65f8b72d34 | ||
|
|
9473daab8b | ||
|
|
5a6021e34f | ||
|
|
1b00526de5 | ||
|
|
5533bd9293 | ||
|
|
587e99d806 | ||
|
|
9cae15bd1b | ||
|
|
d8b51cfaba | ||
|
|
e142ca40d7 | ||
|
|
e982d2e55c | ||
|
|
09e0e1b246 | ||
|
|
9c42825f5d | ||
|
|
4719569e4f | ||
|
|
b03cb3860e | ||
|
|
2ade511f26 | ||
|
|
16b85b06b6 | ||
|
|
03cacf9948 | ||
|
|
c23f8ad113 | ||
|
|
e0a2c5a581 | ||
|
|
417ab3b779 | ||
|
|
1850fe2956 | ||
|
|
dd06e107f9 | ||
|
|
98c19ed0fa | ||
|
|
c0aed1d267 | ||
|
|
0a07130931 | ||
|
|
a6269397c8 | ||
|
|
a80059b47f | ||
|
|
b3a4362626 | ||
|
|
51e2b6e728 | ||
|
|
d1838fb28d | ||
|
|
cd97a39904 | ||
|
|
4e5dd1ebb0 | ||
|
|
88cdefa41e | ||
|
|
c2218f8be8 | ||
|
|
45fee948e9 | ||
|
|
ea49f8a5c4 | ||
|
|
43afea1a9d | ||
|
|
fcfcf86385 | ||
|
|
26b112ab57 | ||
|
|
24612f62dd | ||
|
|
85a231850d | ||
|
|
f024054ed3 | ||
|
|
05751084e7 | ||
|
|
8b6596faa0 | ||
|
|
eab309ff7e | ||
|
|
7de336f087 | ||
|
|
6e9a9dc333 | ||
|
|
848bd7e553 | ||
|
|
f0effd2680 | ||
|
|
aafb468547 | ||
|
|
4aa756c896 | ||
|
|
d3860671a8 | ||
|
|
9dd6e033a7 | ||
|
|
097f62f459 | ||
|
|
048368fd87 | ||
|
|
f9db5ff0d6 | ||
|
|
20ce7d428d | ||
|
|
75bddc0bf5 | ||
|
|
c78043d526 |
16
.github/actions/build-greptime-binary/action.yml
vendored
16
.github/actions/build-greptime-binary/action.yml
vendored
@@ -24,6 +24,14 @@ inputs:
|
|||||||
description: Build android artifacts
|
description: Build android artifacts
|
||||||
required: false
|
required: false
|
||||||
default: 'false'
|
default: 'false'
|
||||||
|
image-namespace:
|
||||||
|
description: Image Namespace
|
||||||
|
required: false
|
||||||
|
default: 'greptime'
|
||||||
|
image-registry:
|
||||||
|
description: Image Registry
|
||||||
|
required: false
|
||||||
|
default: 'docker.io'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -35,7 +43,9 @@ runs:
|
|||||||
make build-by-dev-builder \
|
make build-by-dev-builder \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }}
|
BASE_IMAGE=${{ inputs.base-image }} \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
@@ -53,7 +63,9 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'true' }}
|
if: ${{ inputs.build-android-artifacts == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && make strip-android-bin
|
cd ${{ inputs.working-dir }} && make strip-android-bin \
|
||||||
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
|
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
||||||
|
|
||||||
- name: Upload android artifacts
|
- name: Upload android artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
|||||||
12
.github/actions/build-linux-artifacts/action.yml
vendored
12
.github/actions/build-linux-artifacts/action.yml
vendored
@@ -30,7 +30,9 @@ runs:
|
|||||||
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
# NOTE: If the BUILD_JOBS > 4, it's always OOM in EC2 instance.
|
||||||
run: |
|
run: |
|
||||||
cd ${{ inputs.working-dir }} && \
|
cd ${{ inputs.working-dir }} && \
|
||||||
make run-it-in-container BUILD_JOBS=4
|
make run-it-in-container BUILD_JOBS=4 \
|
||||||
|
IMAGE_NAMESPACE=i8k6a5e1/greptime \
|
||||||
|
IMAGE_REGISTRY=public.ecr.aws
|
||||||
|
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
if: ${{ failure() && inputs.disable-run-tests == 'false' }} # Only upload logs when the integration tests failed.
|
||||||
@@ -49,6 +51,8 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
image-registry: public.ecr.aws
|
||||||
|
image-namespace: i8k6a5e1/greptime
|
||||||
|
|
||||||
- name: Build greptime without pyo3
|
- name: Build greptime without pyo3
|
||||||
if: ${{ inputs.dev-mode == 'false' }}
|
if: ${{ inputs.dev-mode == 'false' }}
|
||||||
@@ -60,6 +64,8 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
image-registry: public.ecr.aws
|
||||||
|
image-namespace: i8k6a5e1/greptime
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -76,6 +82,8 @@ runs:
|
|||||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
|
image-registry: public.ecr.aws
|
||||||
|
image-namespace: i8k6a5e1/greptime
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
@@ -86,3 +94,5 @@ runs:
|
|||||||
version: ${{ inputs.version }}
|
version: ${{ inputs.version }}
|
||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
|
image-registry: public.ecr.aws
|
||||||
|
image-namespace: i8k6a5e1/greptime
|
||||||
|
|||||||
@@ -59,9 +59,15 @@ runs:
|
|||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
|
# Get proper backtraces in mac Sonoma. Currently there's an issue with the new
|
||||||
|
# linker that prevents backtraces from getting printed correctly.
|
||||||
|
#
|
||||||
|
# <https://github.com/rust-lang/rust/issues/113783>
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
if: ${{ inputs.disable-run-tests == 'false' }}
|
if: ${{ inputs.disable-run-tests == 'false' }}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||||
run: |
|
run: |
|
||||||
make test sqlness-test
|
make test sqlness-test
|
||||||
|
|
||||||
@@ -75,6 +81,8 @@ runs:
|
|||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
CARGO_BUILD_RUSTFLAGS: "-Clink-arg=-Wl,-ld_classic"
|
||||||
run: |
|
run: |
|
||||||
make build \
|
make build \
|
||||||
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
CARGO_PROFILE=${{ inputs.cargo-profile }} \
|
||||||
|
|||||||
@@ -22,6 +22,9 @@ inputs:
|
|||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
default: "etcd.etcd-cluster.svc.cluster.local:2379"
|
||||||
description: "Etcd endpoints"
|
description: "Etcd endpoints"
|
||||||
|
values-filename:
|
||||||
|
default: "with-minio.yaml"
|
||||||
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
@@ -57,6 +60,7 @@ runs:
|
|||||||
greptime/greptimedb-cluster \
|
greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
-n my-greptimedb \
|
-n my-greptimedb \
|
||||||
|
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
|
||||||
--wait \
|
--wait \
|
||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
|
|||||||
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
18
.github/actions/setup-greptimedb-cluster/with-disk.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
meta:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
datanode:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
frontend:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
38
.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
meta:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "60s"
|
||||||
|
datanode:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
cache_path = "/data/greptimedb/s3cache"
|
||||||
|
cache_capacity = "256MB"
|
||||||
|
frontend:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "60s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
34
.github/actions/setup-greptimedb-cluster/with-minio.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
meta:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "60s"
|
||||||
|
datanode:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
frontend:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "60s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
45
.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
meta:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
provider = "kafka"
|
||||||
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
|
num_topics = 3
|
||||||
|
|
||||||
|
|
||||||
|
[datanode]
|
||||||
|
[datanode.client]
|
||||||
|
timeout = "60s"
|
||||||
|
datanode:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[wal]
|
||||||
|
provider = "kafka"
|
||||||
|
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
|
||||||
|
linger = "2ms"
|
||||||
|
frontend:
|
||||||
|
config: |-
|
||||||
|
[runtime]
|
||||||
|
read_rt_size = 8
|
||||||
|
write_rt_size = 8
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
|
[meta_client]
|
||||||
|
ddl_timeout = "60s"
|
||||||
|
objectStorage:
|
||||||
|
s3:
|
||||||
|
bucket: default
|
||||||
|
region: us-west-2
|
||||||
|
root: test-root
|
||||||
|
endpoint: http://minio.minio.svc.cluster.local
|
||||||
|
credentials:
|
||||||
|
accessKeyId: rootuser
|
||||||
|
secretAccessKey: rootpass123
|
||||||
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
24
.github/actions/setup-kafka-cluster/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Setup Kafka cluster
|
||||||
|
description: Deploy Kafka cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
controller-replicas:
|
||||||
|
default: 3
|
||||||
|
description: "Kafka controller replicas"
|
||||||
|
namespace:
|
||||||
|
default: "kafka-cluster"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install Kafka cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm upgrade \
|
||||||
|
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||||
|
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||||
|
--set controller.resources.requests.cpu=50m \
|
||||||
|
--set controller.resources.requests.memory=128Mi \
|
||||||
|
--set listeners.controller.protocol=PLAINTEXT \
|
||||||
|
--set listeners.client.protocol=PLAINTEXT \
|
||||||
|
--create-namespace \
|
||||||
|
-n ${{ inputs.namespace }}
|
||||||
24
.github/actions/setup-minio/action.yml
vendored
Normal file
24
.github/actions/setup-minio/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Setup Minio cluster
|
||||||
|
description: Deploy Minio cluster on Kubernetes
|
||||||
|
inputs:
|
||||||
|
replicas:
|
||||||
|
default: 1
|
||||||
|
description: "replicas"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install Etcd cluster
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm repo add minio https://charts.min.io/
|
||||||
|
helm upgrade --install minio \
|
||||||
|
--set resources.requests.memory=128Mi \
|
||||||
|
--set replicas=${{ inputs.replicas }} \
|
||||||
|
--set mode=standalone \
|
||||||
|
--set rootUser=rootuser,rootPassword=rootpass123 \
|
||||||
|
--set buckets[0].name=default \
|
||||||
|
--set service.port=80,service.targetPort=9000 \
|
||||||
|
minio/minio \
|
||||||
|
--create-namespace \
|
||||||
|
-n minio
|
||||||
138
.github/workflows/develop.yml
vendored
138
.github/workflows/develop.yml
vendored
@@ -160,14 +160,16 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz
|
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binaries
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bins
|
name: bins
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: |
|
||||||
|
tar -xvf ./bins.tar.gz
|
||||||
|
rm ./bins.tar.gz
|
||||||
- name: Run GreptimeDB
|
- name: Run GreptimeDB
|
||||||
run: |
|
run: |
|
||||||
./bins/greptime standalone start&
|
./bins/greptime standalone start&
|
||||||
@@ -182,7 +184,7 @@ jobs:
|
|||||||
|
|
||||||
unstable-fuzztest:
|
unstable-fuzztest:
|
||||||
name: Unstable Fuzz Test
|
name: Unstable Fuzz Test
|
||||||
needs: build
|
needs: build-greptime-ci
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -204,20 +206,22 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
sudo apt update && sudo apt install -y libfuzzer-14-dev
|
||||||
cargo install cargo-fuzz
|
cargo install cargo-fuzz cargo-gc-bin
|
||||||
- name: Download pre-built binaries
|
- name: Download pre-built binariy
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: bins
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip bianry
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: |
|
||||||
- name: Fuzz Test
|
tar -xvf ./bin.tar.gz
|
||||||
|
rm ./bin.tar.gz
|
||||||
|
- name: Run Fuzz Test
|
||||||
uses: ./.github/actions/fuzz-test
|
uses: ./.github/actions/fuzz-test
|
||||||
env:
|
env:
|
||||||
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
|
||||||
GT_MYSQL_ADDR: 127.0.0.1:4002
|
GT_MYSQL_ADDR: 127.0.0.1:4002
|
||||||
GT_FUZZ_BINARY_PATH: ./bins/greptime
|
GT_FUZZ_BINARY_PATH: ./bin/greptime
|
||||||
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
|
||||||
with:
|
with:
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -256,7 +260,7 @@ jobs:
|
|||||||
- name: Build greptime bianry
|
- name: Build greptime bianry
|
||||||
shell: bash
|
shell: bash
|
||||||
# `cargo gc` will invoke `cargo build` with specified args
|
# `cargo gc` will invoke `cargo build` with specified args
|
||||||
run: cargo build --bin greptime --profile ci
|
run: cargo gc --profile ci -- --bin greptime
|
||||||
- name: Pack greptime binary
|
- name: Pack greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -271,16 +275,39 @@ jobs:
|
|||||||
version: current
|
version: current
|
||||||
|
|
||||||
distributed-fuzztest:
|
distributed-fuzztest:
|
||||||
name: Fuzz Test (Distributed, Disk)
|
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-greptime-ci
|
needs: build-greptime-ci
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
|
||||||
|
mode:
|
||||||
|
- name: "Disk"
|
||||||
|
minio: false
|
||||||
|
kafka: false
|
||||||
|
values: "with-disk.yaml"
|
||||||
|
- name: "Minio"
|
||||||
|
minio: true
|
||||||
|
kafka: false
|
||||||
|
values: "with-minio.yaml"
|
||||||
|
- name: "Minio with Cache"
|
||||||
|
minio: true
|
||||||
|
kafka: false
|
||||||
|
values: "with-minio-and-cache.yaml"
|
||||||
|
- name: "Remote WAL"
|
||||||
|
minio: true
|
||||||
|
kafka: true
|
||||||
|
values: "with-remote-wal.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Setup Kind
|
- name: Setup Kind
|
||||||
uses: ./.github/actions/setup-kind
|
uses: ./.github/actions/setup-kind
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Setup Minio
|
||||||
|
uses: ./.github/actions/setup-minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Setup Kafka cluser
|
||||||
|
uses: ./.github/actions/setup-kafka-cluster
|
||||||
- name: Setup Etcd cluser
|
- name: Setup Etcd cluser
|
||||||
uses: ./.github/actions/setup-etcd-cluster
|
uses: ./.github/actions/setup-etcd-cluster
|
||||||
# Prepares for fuzz tests
|
# Prepares for fuzz tests
|
||||||
@@ -300,7 +327,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libfuzzer-14-dev
|
sudo apt-get install -y libfuzzer-14-dev
|
||||||
rustup install nightly
|
rustup install nightly
|
||||||
cargo +nightly install cargo-fuzz
|
cargo +nightly install cargo-fuzz cargo-gc-bin
|
||||||
# Downloads ci image
|
# Downloads ci image
|
||||||
- name: Download pre-built binariy
|
- name: Download pre-built binariy
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
@@ -308,7 +335,9 @@ jobs:
|
|||||||
name: bin
|
name: bin
|
||||||
path: .
|
path: .
|
||||||
- name: Unzip binary
|
- name: Unzip binary
|
||||||
run: tar -xvf ./bin.tar.gz
|
run: |
|
||||||
|
tar -xvf ./bin.tar.gz
|
||||||
|
rm ./bin.tar.gz
|
||||||
- name: Build and push GreptimeDB image
|
- name: Build and push GreptimeDB image
|
||||||
uses: ./.github/actions/build-and-push-ci-image
|
uses: ./.github/actions/build-and-push-ci-image
|
||||||
- name: Wait for etcd
|
- name: Wait for etcd
|
||||||
@@ -318,6 +347,22 @@ jobs:
|
|||||||
pod -l app.kubernetes.io/instance=etcd \
|
pod -l app.kubernetes.io/instance=etcd \
|
||||||
--timeout=120s \
|
--timeout=120s \
|
||||||
-n etcd-cluster
|
-n etcd-cluster
|
||||||
|
- if: matrix.mode.minio
|
||||||
|
name: Wait for minio
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app=minio \
|
||||||
|
--timeout=120s \
|
||||||
|
-n minio
|
||||||
|
- if: matrix.mode.kafka
|
||||||
|
name: Wait for kafka
|
||||||
|
run: |
|
||||||
|
kubectl wait \
|
||||||
|
--for=condition=Ready \
|
||||||
|
pod -l app.kubernetes.io/instance=kafka \
|
||||||
|
--timeout=120s \
|
||||||
|
-n kafka-cluster
|
||||||
- name: Print etcd info
|
- name: Print etcd info
|
||||||
shell: bash
|
shell: bash
|
||||||
run: kubectl get all --show-labels -n etcd-cluster
|
run: kubectl get all --show-labels -n etcd-cluster
|
||||||
@@ -326,6 +371,7 @@ jobs:
|
|||||||
uses: ./.github/actions/setup-greptimedb-cluster
|
uses: ./.github/actions/setup-greptimedb-cluster
|
||||||
with:
|
with:
|
||||||
image-registry: localhost:5001
|
image-registry: localhost:5001
|
||||||
|
values-filename: ${{ matrix.mode.values }}
|
||||||
- name: Port forward (mysql)
|
- name: Port forward (mysql)
|
||||||
run: |
|
run: |
|
||||||
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
|
||||||
@@ -351,18 +397,32 @@ jobs:
|
|||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: fuzz-tests-kind-logs-${{ matrix.target }}
|
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
|
||||||
path: /tmp/kind
|
path: /tmp/kind
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
- name: Delete cluster
|
||||||
|
if: success()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
kind delete cluster
|
||||||
|
docker stop $(docker ps -a -q)
|
||||||
|
docker rm $(docker ps -a -q)
|
||||||
|
docker system prune -f
|
||||||
|
|
||||||
sqlness:
|
sqlness:
|
||||||
name: Sqlness Test
|
name: Sqlness Test (${{ matrix.mode.name }})
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
|
mode:
|
||||||
|
- name: "Basic"
|
||||||
|
opts: ""
|
||||||
|
kafka: false
|
||||||
|
- name: "Remote WAL"
|
||||||
|
opts: "-w kafka -k 127.0.0.1:9092"
|
||||||
|
kafka: true
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -373,43 +433,17 @@ jobs:
|
|||||||
path: .
|
path: .
|
||||||
- name: Unzip binaries
|
- name: Unzip binaries
|
||||||
run: tar -xvf ./bins.tar.gz
|
run: tar -xvf ./bins.tar.gz
|
||||||
- name: Run sqlness
|
- if: matrix.mode.kafka
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins --preserve-state
|
name: Setup kafka server
|
||||||
- name: Upload sqlness logs
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: sqlness-logs
|
|
||||||
path: /tmp/sqlness*
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
sqlness-kafka-wal:
|
|
||||||
name: Sqlness Test with Kafka Wal
|
|
||||||
needs: build
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ ubuntu-20.04 ]
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Download pre-built binaries
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bins
|
|
||||||
path: .
|
|
||||||
- name: Unzip binaries
|
|
||||||
run: tar -xvf ./bins.tar.gz
|
|
||||||
- name: Setup kafka server
|
|
||||||
working-directory: tests-integration/fixtures/kafka
|
working-directory: tests-integration/fixtures/kafka
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run sqlness
|
- name: Run sqlness
|
||||||
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins --preserve-state
|
run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
|
||||||
- name: Upload sqlness logs
|
- name: Upload sqlness logs
|
||||||
if: always()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sqlness-logs-with-kafka-wal
|
name: sqlness-logs-${{ matrix.mode.name }}
|
||||||
path: /tmp/sqlness*
|
path: /tmp/sqlness*
|
||||||
retention-days: 3
|
retention-days: 3
|
||||||
|
|
||||||
@@ -498,6 +532,9 @@ jobs:
|
|||||||
- name: Setup kafka server
|
- name: Setup kafka server
|
||||||
working-directory: tests-integration/fixtures/kafka
|
working-directory: tests-integration/fixtures/kafka
|
||||||
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
|
- name: Setup minio
|
||||||
|
working-directory: tests-integration/fixtures/minio
|
||||||
|
run: docker compose -f docker-compose-standalone.yml up -d --wait
|
||||||
- name: Run nextest cases
|
- name: Run nextest cases
|
||||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
|
||||||
env:
|
env:
|
||||||
@@ -508,6 +545,11 @@ jobs:
|
|||||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||||
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
|
||||||
|
GT_MINIO_BUCKET: greptime
|
||||||
|
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
|
||||||
|
GT_MINIO_ACCESS_KEY: superpower_password
|
||||||
|
GT_MINIO_REGION: us-west-2
|
||||||
|
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||||
|
|||||||
1641
Cargo.lock
generated
1641
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@@ -64,7 +64,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.8.0"
|
version = "0.8.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
|
|||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "5.4"
|
dashmap = "5.4"
|
||||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-functions = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-physical-plan = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-sql = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
datafusion-substrait = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34eda15b73a9e278af8844b30ed2f1c21c10359c" }
|
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "08e19f4956d32164be6fc66eb5a4c080eb0023d1" }
|
||||||
derive_builder = "0.12"
|
derive_builder = "0.12"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
# TODO(LFC): Wait for https://github.com/etcdv3/etcd-client/pull/76
|
||||||
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ae26136accd82fbdf8be540cd502f2e94951077e" }
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
humantime-serde = "1.1"
|
humantime-serde = "1.1"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
@@ -146,13 +146,15 @@ raft-engine = { version = "0.4.1", default-features = false }
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.8"
|
regex = "1.8"
|
||||||
regex-automata = { version = "0.4" }
|
regex-automata = { version = "0.4" }
|
||||||
reqwest = { version = "0.11", default-features = false, features = [
|
reqwest = { version = "0.12", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"stream",
|
"stream",
|
||||||
"multipart",
|
"multipart",
|
||||||
] }
|
] }
|
||||||
rskafka = "0.5"
|
rskafka = "0.5"
|
||||||
|
rstest = "0.21"
|
||||||
|
rstest_reuse = "0.7"
|
||||||
rust_decimal = "1.33"
|
rust_decimal = "1.33"
|
||||||
schemars = "0.8"
|
schemars = "0.8"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
@@ -162,7 +164,7 @@ smallvec = { version = "1", features = ["serde"] }
|
|||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sysinfo = "0.30"
|
sysinfo = "0.30"
|
||||||
# on branch v0.44.x
|
# on branch v0.44.x
|
||||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
|
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
|
||||||
"visitor",
|
"visitor",
|
||||||
] }
|
] }
|
||||||
strum = { version = "0.25", features = ["derive"] }
|
strum = { version = "0.25", features = ["derive"] }
|
||||||
@@ -172,6 +174,7 @@ tokio-stream = { version = "0.1" }
|
|||||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
|
||||||
|
tower = { version = "0.4" }
|
||||||
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
|
||||||
zstd = "0.13"
|
zstd = "0.13"
|
||||||
|
|
||||||
@@ -232,8 +235,6 @@ sql = { path = "src/sql" }
|
|||||||
store-api = { path = "src/store-api" }
|
store-api = { path = "src/store-api" }
|
||||||
substrait = { path = "src/common/substrait" }
|
substrait = { path = "src/common/substrait" }
|
||||||
table = { path = "src/table" }
|
table = { path = "src/table" }
|
||||||
# TODO some code depends on this
|
|
||||||
tests-integration = { path = "tests-integration" }
|
|
||||||
|
|
||||||
[workspace.dependencies.meter-macros]
|
[workspace.dependencies.meter-macros]
|
||||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||||
@@ -251,9 +252,12 @@ incremental = false
|
|||||||
|
|
||||||
[profile.ci]
|
[profile.ci]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
debug = false
|
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
[profile.dev.package.sqlness-runner]
|
[profile.dev.package.sqlness-runner]
|
||||||
debug = false
|
debug = false
|
||||||
strip = true
|
strip = true
|
||||||
|
|
||||||
|
[profile.dev.package.tests-fuzz]
|
||||||
|
debug = false
|
||||||
|
strip = true
|
||||||
|
|||||||
11
Makefile
11
Makefile
@@ -163,6 +163,13 @@ nextest: ## Install nextest tools.
|
|||||||
sqlness-test: ## Run sqlness test.
|
sqlness-test: ## Run sqlness test.
|
||||||
cargo sqlness
|
cargo sqlness
|
||||||
|
|
||||||
|
# Run fuzz test ${FUZZ_TARGET}.
|
||||||
|
RUNS ?= 1
|
||||||
|
FUZZ_TARGET ?= fuzz_alter_table
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz:
|
||||||
|
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: ## Cargo check all the targets.
|
check: ## Cargo check all the targets.
|
||||||
cargo check --workspace --all-targets --all-features
|
cargo check --workspace --all-targets --all-features
|
||||||
@@ -194,6 +201,10 @@ run-it-in-container: start-etcd ## Run integration tests in dev-builder.
|
|||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
|
||||||
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
|
||||||
|
|
||||||
|
.PHONY: run-cluster-with-etcd
|
||||||
|
run-cluster-with-etcd: ## Run greptime cluster with etcd in docker-compose.
|
||||||
|
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml up
|
||||||
|
|
||||||
##@ Docs
|
##@ Docs
|
||||||
config-docs: ## Generate configuration documentation from toml files.
|
config-docs: ## Generate configuration documentation from toml files.
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ api.workspace = true
|
|||||||
arrow.workspace = true
|
arrow.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
client.workspace = true
|
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
@@ -33,8 +32,6 @@ rand.workspace = true
|
|||||||
rskafka.workspace = true
|
rskafka.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
store-api.workspace = true
|
store-api.workspace = true
|
||||||
# TODO depend `Database` client
|
|
||||||
tests-integration.workspace = true
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
toml.workspace = true
|
toml.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use rand::distributions::{Alphanumeric, DistString, Uniform};
|
|||||||
use rand::rngs::SmallRng;
|
use rand::rngs::SmallRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use store_api::logstore::provider::Provider;
|
||||||
use store_api::logstore::LogStore;
|
use store_api::logstore::LogStore;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
|
|
||||||
@@ -210,7 +211,7 @@ impl From<Args> for Config {
|
|||||||
pub struct Region {
|
pub struct Region {
|
||||||
id: RegionId,
|
id: RegionId,
|
||||||
schema: Vec<ColumnSchema>,
|
schema: Vec<ColumnSchema>,
|
||||||
wal_options: WalOptions,
|
provider: Provider,
|
||||||
next_sequence: AtomicU64,
|
next_sequence: AtomicU64,
|
||||||
next_entry_id: AtomicU64,
|
next_entry_id: AtomicU64,
|
||||||
next_timestamp: AtomicI64,
|
next_timestamp: AtomicI64,
|
||||||
@@ -227,10 +228,14 @@ impl Region {
|
|||||||
num_rows: u32,
|
num_rows: u32,
|
||||||
rng_seed: u64,
|
rng_seed: u64,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let provider = match wal_options {
|
||||||
|
WalOptions::RaftEngine => Provider::raft_engine_provider(id.as_u64()),
|
||||||
|
WalOptions::Kafka(opts) => Provider::kafka_provider(opts.topic),
|
||||||
|
};
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
schema,
|
schema,
|
||||||
wal_options,
|
provider,
|
||||||
next_sequence: AtomicU64::new(1),
|
next_sequence: AtomicU64::new(1),
|
||||||
next_entry_id: AtomicU64::new(1),
|
next_entry_id: AtomicU64::new(1),
|
||||||
next_timestamp: AtomicI64::new(1655276557000),
|
next_timestamp: AtomicI64::new(1655276557000),
|
||||||
@@ -258,14 +263,14 @@ impl Region {
|
|||||||
self.id,
|
self.id,
|
||||||
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
self.next_entry_id.fetch_add(1, Ordering::Relaxed),
|
||||||
&entry,
|
&entry,
|
||||||
&self.wal_options,
|
&self.provider,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replays the region.
|
/// Replays the region.
|
||||||
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
pub async fn replay<S: LogStore>(&self, wal: &Arc<Wal<S>>) {
|
||||||
let mut wal_stream = wal.scan(self.id, 0, &self.wal_options).unwrap();
|
let mut wal_stream = wal.scan(self.id, 0, &self.provider).unwrap();
|
||||||
while let Some(res) = wal_stream.next().await {
|
while let Some(res) = wal_stream.next().await {
|
||||||
let (_, entry) = res.unwrap();
|
let (_, entry) = res.unwrap();
|
||||||
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
metrics::METRIC_WAL_READ_BYTES_TOTAL.inc_by(Self::entry_estimated_size(&entry) as u64);
|
||||||
|
|||||||
@@ -13,6 +13,10 @@
|
|||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||||
|
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
| `http.timeout` | String | `30s` | HTTP request timeout. |
|
||||||
@@ -154,6 +158,10 @@
|
|||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||||
| `default_timezone` | String | `None` | The default timezone of the server. |
|
| `default_timezone` | String | `None` | The default timezone of the server. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||||
|
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `18s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
@@ -240,6 +248,10 @@
|
|||||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||||
|
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
|
||||||
| `procedure` | -- | -- | Procedure storage options. |
|
| `procedure` | -- | -- | Procedure storage options. |
|
||||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||||
@@ -294,12 +306,17 @@
|
|||||||
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | `None` | The datanode identifier and should be unique in the cluster. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
| `rpc_addr` | String | `127.0.0.1:3001` | The gRPC address of the datanode. |
|
||||||
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
| `rpc_hostname` | String | `None` | The hostname of the datanode. |
|
||||||
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
| `rpc_runtime_size` | Integer | `8` | The number of gRPC server worker threads. |
|
||||||
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `rpc_max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `rpc_max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
|
||||||
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
|
| `runtime.read_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
|
| `runtime.write_rt_size` | Integer | `8` | The number of threads to execute the runtime for global write operations. |
|
||||||
|
| `runtime.bg_rt_size` | Integer | `8` | The number of threads to execute the runtime for global background operations. |
|
||||||
| `heartbeat` | -- | -- | The heartbeat options. |
|
| `heartbeat` | -- | -- | The heartbeat options. |
|
||||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||||
|
|||||||
@@ -13,6 +13,9 @@ require_lease_before_startup = false
|
|||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
|
|
||||||
|
## Parallelism of initializing regions.
|
||||||
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
## The gRPC address of the datanode.
|
## The gRPC address of the datanode.
|
||||||
rpc_addr = "127.0.0.1:3001"
|
rpc_addr = "127.0.0.1:3001"
|
||||||
|
|
||||||
@@ -32,6 +35,15 @@ rpc_max_send_message_size = "512MB"
|
|||||||
## Enable telemetry to collect anonymous usage data.
|
## Enable telemetry to collect anonymous usage data.
|
||||||
enable_telemetry = true
|
enable_telemetry = true
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
[runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
read_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
write_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global background operations.
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
|
|||||||
@@ -5,6 +5,15 @@ mode = "standalone"
|
|||||||
## +toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
[runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
read_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
write_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global background operations.
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
## The heartbeat options.
|
## The heartbeat options.
|
||||||
[heartbeat]
|
[heartbeat]
|
||||||
## Interval for sending heartbeat messages to the metasrv.
|
## Interval for sending heartbeat messages to the metasrv.
|
||||||
|
|||||||
@@ -25,6 +25,15 @@ enable_telemetry = true
|
|||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
store_key_prefix = ""
|
store_key_prefix = ""
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
[runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
read_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
write_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global background operations.
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
## Procedure storage options.
|
## Procedure storage options.
|
||||||
[procedure]
|
[procedure]
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,15 @@ enable_telemetry = true
|
|||||||
## +toml2docs:none-default
|
## +toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The runtime options.
|
||||||
|
[runtime]
|
||||||
|
## The number of threads to execute the runtime for global read operations.
|
||||||
|
read_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global write operations.
|
||||||
|
write_rt_size = 8
|
||||||
|
## The number of threads to execute the runtime for global background operations.
|
||||||
|
bg_rt_size = 8
|
||||||
|
|
||||||
## The HTTP server options.
|
## The HTTP server options.
|
||||||
[http]
|
[http]
|
||||||
## The address to bind the HTTP server.
|
## The address to bind the HTTP server.
|
||||||
|
|||||||
102
docker/docker-compose/cluster-with-etcd.yaml
Normal file
102
docker/docker-compose/cluster-with-etcd.yaml
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
x-custom:
|
||||||
|
initial_cluster_token: &initial_cluster_token "--initial-cluster-token=etcd-cluster"
|
||||||
|
common_settings: &common_settings
|
||||||
|
image: quay.io/coreos/etcd:v3.5.10
|
||||||
|
entrypoint: /usr/local/bin/etcd
|
||||||
|
|
||||||
|
services:
|
||||||
|
etcd0:
|
||||||
|
<<: *common_settings
|
||||||
|
container_name: etcd0
|
||||||
|
ports:
|
||||||
|
- 2379:2379
|
||||||
|
- 2380:2380
|
||||||
|
command:
|
||||||
|
- --name=etcd0
|
||||||
|
- --data-dir=/var/lib/etcd
|
||||||
|
- --initial-advertise-peer-urls=http://etcd0:2380
|
||||||
|
- --listen-peer-urls=http://0.0.0.0:2380
|
||||||
|
- --listen-client-urls=http://0.0.0.0:2379
|
||||||
|
- --advertise-client-urls=http://etcd0:2379
|
||||||
|
- --heartbeat-interval=250
|
||||||
|
- --election-timeout=1250
|
||||||
|
- --initial-cluster=etcd0=http://etcd0:2380
|
||||||
|
- --initial-cluster-state=new
|
||||||
|
- *initial_cluster_token
|
||||||
|
volumes:
|
||||||
|
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
metasrv:
|
||||||
|
image: docker.io/greptime/greptimedb:latest
|
||||||
|
container_name: metasrv
|
||||||
|
ports:
|
||||||
|
- 3002:3002
|
||||||
|
command:
|
||||||
|
- metasrv
|
||||||
|
- start
|
||||||
|
- --bind-addr=0.0.0.0:3002
|
||||||
|
- --server-addr=metasrv:3002
|
||||||
|
- --store-addrs=etcd0:2379
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
depends_on:
|
||||||
|
etcd0:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
datanode0:
|
||||||
|
image: docker.io/greptime/greptimedb:latest
|
||||||
|
container_name: datanode0
|
||||||
|
ports:
|
||||||
|
- 3001:3001
|
||||||
|
command:
|
||||||
|
- datanode
|
||||||
|
- start
|
||||||
|
- --node-id=0
|
||||||
|
- --rpc-addr=0.0.0.0:3001
|
||||||
|
- --rpc-hostname=datanode0:3001
|
||||||
|
- --metasrv-addr=metasrv:3002
|
||||||
|
volumes:
|
||||||
|
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||||
|
depends_on:
|
||||||
|
metasrv:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
frontend0:
|
||||||
|
image: docker.io/greptime/greptimedb:latest
|
||||||
|
container_name: frontend0
|
||||||
|
ports:
|
||||||
|
- 4000:4000
|
||||||
|
- 4001:4001
|
||||||
|
- 4002:4002
|
||||||
|
- 4003:4003
|
||||||
|
command:
|
||||||
|
- frontend
|
||||||
|
- start
|
||||||
|
- --metasrv-addrs=metasrv:3002
|
||||||
|
- --http-addr=0.0.0.0:4000
|
||||||
|
- --rpc-addr=0.0.0.0:4001
|
||||||
|
- --mysql-addr=0.0.0.0:4002
|
||||||
|
- --postgres-addr=0.0.0.0:4003
|
||||||
|
depends_on:
|
||||||
|
metasrv:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- greptimedb
|
||||||
|
|
||||||
|
networks:
|
||||||
|
greptimedb:
|
||||||
|
name: greptimedb
|
||||||
1
src/cache/Cargo.toml
vendored
1
src/cache/Cargo.toml
vendored
@@ -11,3 +11,4 @@ common-macro.workspace = true
|
|||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
substrait.workspace = true
|
||||||
|
|||||||
15
src/cache/src/lib.rs
vendored
15
src/cache/src/lib.rs
vendored
@@ -20,7 +20,8 @@ use std::time::Duration;
|
|||||||
use catalog::kvbackend::new_table_cache;
|
use catalog::kvbackend::new_table_cache;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache,
|
||||||
new_table_route_cache, CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder,
|
new_table_route_cache, new_view_info_cache, CacheRegistry, CacheRegistryBuilder,
|
||||||
|
LayeredCacheRegistryBuilder,
|
||||||
};
|
};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
@@ -33,6 +34,7 @@ const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(10 * 60);
|
|||||||
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
const DEFAULT_CACHE_TTI: Duration = Duration::from_secs(5 * 60);
|
||||||
|
|
||||||
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
pub const TABLE_INFO_CACHE_NAME: &str = "table_info_cache";
|
||||||
|
pub const VIEW_INFO_CACHE_NAME: &str = "view_info_cache";
|
||||||
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
pub const TABLE_NAME_CACHE_NAME: &str = "table_name_cache";
|
||||||
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
pub const TABLE_CACHE_NAME: &str = "table_cache";
|
||||||
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
pub const TABLE_FLOWNODE_SET_CACHE_NAME: &str = "table_flownode_set_cache";
|
||||||
@@ -82,11 +84,22 @@ pub fn build_fundamental_cache_registry(kv_backend: KvBackendRef) -> CacheRegist
|
|||||||
cache,
|
cache,
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
));
|
));
|
||||||
|
// Builds the view info cache
|
||||||
|
let cache = CacheBuilder::new(DEFAULT_CACHE_MAX_CAPACITY)
|
||||||
|
.time_to_live(DEFAULT_CACHE_TTL)
|
||||||
|
.time_to_idle(DEFAULT_CACHE_TTI)
|
||||||
|
.build();
|
||||||
|
let view_info_cache = Arc::new(new_view_info_cache(
|
||||||
|
VIEW_INFO_CACHE_NAME.to_string(),
|
||||||
|
cache,
|
||||||
|
kv_backend.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
CacheRegistryBuilder::default()
|
CacheRegistryBuilder::default()
|
||||||
.add_cache(table_info_cache)
|
.add_cache(table_info_cache)
|
||||||
.add_cache(table_name_cache)
|
.add_cache(table_name_cache)
|
||||||
.add_cache(table_route_cache)
|
.add_cache(table_route_cache)
|
||||||
|
.add_cache(view_info_cache)
|
||||||
.add_cache(table_flownode_set_cache)
|
.add_cache(table_flownode_set_cache)
|
||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ arrow.workspace = true
|
|||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
bytes.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
@@ -48,8 +49,11 @@ table.workspace = true
|
|||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
cache.workspace = true
|
||||||
catalog = { workspace = true, features = ["testing"] }
|
catalog = { workspace = true, features = ["testing"] }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
|
common-meta = { workspace = true, features = ["testing"] }
|
||||||
|
common-query = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
log-store.workspace = true
|
log-store.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
|
|||||||
@@ -19,10 +19,7 @@ use common_error::ext::{BoxedError, ErrorExt};
|
|||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use datafusion::error::DataFusionError;
|
use datafusion::error::DataFusionError;
|
||||||
use datatypes::prelude::ConcreteDataType;
|
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
use table::metadata::TableId;
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
@@ -65,19 +62,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
#[snafu(display("Failed to open system catalog table"))]
|
|
||||||
OpenSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create system catalog table"))]
|
|
||||||
CreateSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
#[snafu(display("Failed to create table, table info: {}", table_info))]
|
||||||
CreateTable {
|
CreateTable {
|
||||||
@@ -94,52 +78,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display(
|
|
||||||
"System catalog table type mismatch, expected: binary, found: {:?}",
|
|
||||||
data_type,
|
|
||||||
))]
|
|
||||||
SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog entry type: {:?}", entry_type))]
|
|
||||||
InvalidEntryType {
|
|
||||||
entry_type: Option<u8>,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Invalid system catalog key: {:?}", key))]
|
|
||||||
InvalidKey {
|
|
||||||
key: Option<String>,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Catalog value is not present"))]
|
|
||||||
EmptyValue {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize value"))]
|
|
||||||
ValueDeserialize {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: serde_json::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table engine not found: {}", engine_name))]
|
|
||||||
TableEngineNotFound {
|
|
||||||
engine_name: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
#[snafu(display("Cannot find catalog by name: {}", catalog_name))]
|
||||||
CatalogNotFound {
|
CatalogNotFound {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
@@ -169,44 +107,9 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Schema {} already exists", schema))]
|
#[snafu(display("View info not found: {}", name))]
|
||||||
SchemaExists {
|
ViewInfoNotFound {
|
||||||
schema: String,
|
name: String,
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Operation {} not implemented yet", operation))]
|
|
||||||
Unimplemented {
|
|
||||||
operation: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Operation {} not supported", op))]
|
|
||||||
NotSupported {
|
|
||||||
op: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to open table {table_id}"))]
|
|
||||||
OpenTable {
|
|
||||||
table_id: TableId,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to open table in parallel"))]
|
|
||||||
ParallelOpenTable {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: JoinError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table not found while opening table, table info: {}", table_info))]
|
|
||||||
TableNotFound {
|
|
||||||
table_info: String,
|
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
@@ -217,13 +120,6 @@ pub enum Error {
|
|||||||
#[snafu(display("Failed to find region routes"))]
|
#[snafu(display("Failed to find region routes"))]
|
||||||
FindRegionRoutes { source: partition::error::Error },
|
FindRegionRoutes { source: partition::error::Error },
|
||||||
|
|
||||||
#[snafu(display("Failed to read system catalog table records"))]
|
|
||||||
ReadSystemCatalog {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_recordbatch::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to create recordbatch"))]
|
#[snafu(display("Failed to create recordbatch"))]
|
||||||
CreateRecordBatch {
|
CreateRecordBatch {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -231,20 +127,6 @@ pub enum Error {
|
|||||||
source: common_recordbatch::error::Error,
|
source: common_recordbatch::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to insert table creation record to system catalog"))]
|
|
||||||
InsertCatalogRecord {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to scan system catalog table"))]
|
|
||||||
SystemCatalogTableScan {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Internal error"))]
|
#[snafu(display("Internal error"))]
|
||||||
Internal {
|
Internal {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -258,20 +140,14 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to execute system catalog table scan"))]
|
#[snafu(display("Failed to decode logical plan for view: {}", name))]
|
||||||
SystemCatalogTableScanExec {
|
DecodePlan {
|
||||||
|
name: String,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
source: common_query::error::Error,
|
source: common_query::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Cannot parse catalog value"))]
|
|
||||||
InvalidCatalogValue {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_catalog::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to perform metasrv operation"))]
|
#[snafu(display("Failed to perform metasrv operation"))]
|
||||||
Metasrv {
|
Metasrv {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -297,20 +173,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table schema mismatch"))]
|
|
||||||
TableSchemaMismatch {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: table::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("A generic error has occurred, msg: {}", msg))]
|
|
||||||
Generic {
|
|
||||||
msg: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Table metadata manager error"))]
|
#[snafu(display("Table metadata manager error"))]
|
||||||
TableMetadataManager {
|
TableMetadataManager {
|
||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
@@ -324,6 +186,26 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to get view info from cache"))]
|
||||||
|
GetViewCache {
|
||||||
|
source: common_meta::error::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Cache not found: {name}"))]
|
||||||
|
CacheNotFound {
|
||||||
|
name: String,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to cast the catalog manager"))]
|
||||||
|
CastManager {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -331,61 +213,43 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::InvalidKey { .. }
|
Error::SchemaNotFound { .. }
|
||||||
| Error::SchemaNotFound { .. }
|
|
||||||
| Error::CatalogNotFound { .. }
|
| Error::CatalogNotFound { .. }
|
||||||
| Error::FindPartitions { .. }
|
| Error::FindPartitions { .. }
|
||||||
| Error::FindRegionRoutes { .. }
|
| Error::FindRegionRoutes { .. }
|
||||||
| Error::InvalidEntryType { .. }
|
| Error::CacheNotFound { .. }
|
||||||
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
|
| Error::CastManager { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
Error::ViewInfoNotFound { .. } => StatusCode::TableNotFound,
|
||||||
|
|
||||||
Error::SystemCatalog { .. }
|
Error::SystemCatalog { .. } => StatusCode::StorageUnavailable,
|
||||||
| Error::EmptyValue { .. }
|
|
||||||
| Error::ValueDeserialize { .. } => StatusCode::StorageUnavailable,
|
|
||||||
|
|
||||||
Error::Generic { .. }
|
Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
||||||
| Error::SystemCatalogTypeMismatch { .. }
|
|
||||||
| Error::UpgradeWeakCatalogManagerRef { .. } => StatusCode::Internal,
|
|
||||||
|
|
||||||
Error::ReadSystemCatalog { source, .. } | Error::CreateRecordBatch { source, .. } => {
|
|
||||||
source.status_code()
|
|
||||||
}
|
|
||||||
Error::InvalidCatalogValue { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
|
Error::CreateRecordBatch { source, .. } => source.status_code(),
|
||||||
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
Error::TableExists { .. } => StatusCode::TableAlreadyExists,
|
||||||
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
Error::TableNotExist { .. } => StatusCode::TableNotFound,
|
||||||
Error::SchemaExists { .. } | Error::TableEngineNotFound { .. } => {
|
|
||||||
StatusCode::InvalidArguments
|
|
||||||
}
|
|
||||||
|
|
||||||
Error::ListCatalogs { source, .. }
|
Error::ListCatalogs { source, .. }
|
||||||
| Error::ListNodes { source, .. }
|
| Error::ListNodes { source, .. }
|
||||||
| Error::ListSchemas { source, .. }
|
| Error::ListSchemas { source, .. }
|
||||||
| Error::ListTables { source, .. } => source.status_code(),
|
| Error::ListTables { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::OpenSystemCatalog { source, .. }
|
Error::CreateTable { source, .. } => source.status_code(),
|
||||||
| Error::CreateSystemCatalog { source, .. }
|
|
||||||
| Error::InsertCatalogRecord { source, .. }
|
|
||||||
| Error::OpenTable { source, .. }
|
|
||||||
| Error::CreateTable { source, .. }
|
|
||||||
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
Error::Metasrv { source, .. } => source.status_code(),
|
Error::Metasrv { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
|
Error::DecodePlan { source, .. } => source.status_code(),
|
||||||
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
|
|
||||||
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
Error::CompileScriptInternal { source, .. } | Error::Internal { source, .. } => {
|
||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
|
|
||||||
Error::Unimplemented { .. } | Error::NotSupported { .. } => StatusCode::Unsupported,
|
|
||||||
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
Error::QueryAccessDenied { .. } => StatusCode::AccessDenied,
|
||||||
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
Error::Datafusion { .. } => StatusCode::EngineExecuteQuery,
|
||||||
Error::TableMetadataManager { source, .. } => source.status_code(),
|
Error::TableMetadataManager { source, .. } => source.status_code(),
|
||||||
Error::GetTableCache { .. } => StatusCode::Internal,
|
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||||
|
source.status_code()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -417,11 +281,6 @@ mod tests {
|
|||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Unexpected,
|
|
||||||
InvalidKeySnafu { key: None }.build().status_code()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
StatusCode::StorageUnavailable,
|
StatusCode::StorageUnavailable,
|
||||||
Error::SystemCatalog {
|
Error::SystemCatalog {
|
||||||
@@ -430,19 +289,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
.status_code()
|
.status_code()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::Internal,
|
|
||||||
Error::SystemCatalogTypeMismatch {
|
|
||||||
data_type: ConcreteDataType::binary_datatype(),
|
|
||||||
location: Location::generate(),
|
|
||||||
}
|
|
||||||
.status_code()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
StatusCode::StorageUnavailable,
|
|
||||||
EmptyValueSnafu {}.build().status_code()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -105,7 +105,9 @@ impl InformationTable for InformationSchemaTables {
|
|||||||
.make_tables(Some(request))
|
.make_tables(Some(request))
|
||||||
.await
|
.await
|
||||||
.map(|x| x.into_df_record_batch())
|
.map(|x| x.into_df_record_batch())
|
||||||
.map_err(Into::into)
|
.map_err(|err| {
|
||||||
|
datafusion::error::DataFusionError::External(format!("{err:?}").into())
|
||||||
|
})
|
||||||
}),
|
}),
|
||||||
));
|
));
|
||||||
Ok(Box::pin(
|
Ok(Box::pin(
|
||||||
|
|||||||
@@ -22,14 +22,13 @@ use common_catalog::consts::{
|
|||||||
};
|
};
|
||||||
use common_config::Mode;
|
use common_config::Mode;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::TableRouteCacheRef;
|
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::TableInfoValue;
|
use common_meta::key::table_info::TableInfoValue;
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use futures_util::stream::BoxStream;
|
use futures_util::stream::BoxStream;
|
||||||
use futures_util::{StreamExt, TryStreamExt};
|
use futures_util::{StreamExt, TryStreamExt};
|
||||||
use meta_client::client::MetaClient;
|
use meta_client::client::MetaClient;
|
||||||
@@ -38,11 +37,12 @@ use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
|||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::information_schema::InformationSchemaProvider;
|
use crate::information_schema::InformationSchemaProvider;
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
@@ -61,25 +61,26 @@ pub struct KvBackendCatalogManager {
|
|||||||
table_metadata_manager: TableMetadataManagerRef,
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
/// A sub-CatalogManager that handles system tables
|
/// A sub-CatalogManager that handles system tables
|
||||||
system_catalog: SystemCatalog,
|
system_catalog: SystemCatalog,
|
||||||
table_cache: TableCacheRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||||
|
|
||||||
impl KvBackendCatalogManager {
|
impl KvBackendCatalogManager {
|
||||||
pub async fn new(
|
pub fn new(
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
meta_client: Option<Arc<MetaClient>>,
|
meta_client: Option<Arc<MetaClient>>,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
table_cache: TableCacheRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
table_route_cache: TableRouteCacheRef,
|
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
mode,
|
mode,
|
||||||
meta_client,
|
meta_client,
|
||||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||||
backend.clone(),
|
backend.clone(),
|
||||||
table_route_cache,
|
cache_registry
|
||||||
|
.get()
|
||||||
|
.expect("Failed to get table_route_cache"),
|
||||||
)),
|
)),
|
||||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
|
||||||
system_catalog: SystemCatalog {
|
system_catalog: SystemCatalog {
|
||||||
@@ -90,7 +91,7 @@ impl KvBackendCatalogManager {
|
|||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
},
|
},
|
||||||
table_cache,
|
cache_registry,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,6 +100,12 @@ impl KvBackendCatalogManager {
|
|||||||
&self.mode
|
&self.mode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||||
|
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "view_info_cache",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `[MetaClient]`.
|
/// Returns the `[MetaClient]`.
|
||||||
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
pub fn meta_client(&self) -> Option<Arc<MetaClient>> {
|
||||||
self.meta_client.clone()
|
self.meta_client.clone()
|
||||||
@@ -215,7 +222,11 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
return Ok(Some(table));
|
return Ok(Some(table));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.table_cache
|
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
|
name: "table_cache",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
table_cache
|
||||||
.get_by_ref(&TableName {
|
.get_by_ref(&TableName {
|
||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
schema_name: schema_name.to_string(),
|
schema_name: schema_name.to_string(),
|
||||||
|
|||||||
@@ -17,11 +17,11 @@ use std::sync::Arc;
|
|||||||
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
use common_meta::cache::{CacheContainer, Initializer, TableInfoCacheRef, TableNameCacheRef};
|
||||||
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
use common_meta::error::{Result as MetaResult, ValueNotExistSnafu};
|
||||||
use common_meta::instruction::CacheIdent;
|
use common_meta::instruction::CacheIdent;
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
|
|
||||||
pub type TableCacheRef = Arc<TableCache>;
|
pub type TableCacheRef = Arc<TableCache>;
|
||||||
|
|||||||
@@ -15,15 +15,25 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use common_catalog::format_full_table_name;
|
use common_catalog::format_full_table_name;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoderRef;
|
||||||
use datafusion::common::{ResolvedTableReference, TableReference};
|
use datafusion::common::{ResolvedTableReference, TableReference};
|
||||||
use datafusion::datasource::provider_as_source;
|
use datafusion::datasource::view::ViewTable;
|
||||||
|
use datafusion::datasource::{provider_as_source, TableProvider};
|
||||||
use datafusion::logical_expr::TableSource;
|
use datafusion::logical_expr::TableSource;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::{ensure, OptionExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
|
use table::metadata::TableType;
|
||||||
use table::table::adapter::DfTableProviderAdapter;
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
mod dummy_catalog;
|
||||||
|
use dummy_catalog::DummyCatalogList;
|
||||||
|
|
||||||
use crate::error::{QueryAccessDeniedSnafu, Result, TableNotExistSnafu};
|
use crate::error::{
|
||||||
|
CastManagerSnafu, DatafusionSnafu, DecodePlanSnafu, GetViewCacheSnafu, QueryAccessDeniedSnafu,
|
||||||
|
Result, TableNotExistSnafu, ViewInfoNotFoundSnafu,
|
||||||
|
};
|
||||||
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::CatalogManagerRef;
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
pub struct DfTableSourceProvider {
|
pub struct DfTableSourceProvider {
|
||||||
@@ -32,6 +42,7 @@ pub struct DfTableSourceProvider {
|
|||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
default_catalog: String,
|
default_catalog: String,
|
||||||
default_schema: String,
|
default_schema: String,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DfTableSourceProvider {
|
impl DfTableSourceProvider {
|
||||||
@@ -39,6 +50,7 @@ impl DfTableSourceProvider {
|
|||||||
catalog_manager: CatalogManagerRef,
|
catalog_manager: CatalogManagerRef,
|
||||||
disallow_cross_catalog_query: bool,
|
disallow_cross_catalog_query: bool,
|
||||||
query_ctx: &QueryContext,
|
query_ctx: &QueryContext,
|
||||||
|
plan_decoder: SubstraitPlanDecoderRef,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
@@ -46,6 +58,7 @@ impl DfTableSourceProvider {
|
|||||||
resolved_tables: HashMap::new(),
|
resolved_tables: HashMap::new(),
|
||||||
default_catalog: query_ctx.current_catalog().to_owned(),
|
default_catalog: query_ctx.current_catalog().to_owned(),
|
||||||
default_schema: query_ctx.current_schema().to_owned(),
|
default_schema: query_ctx.current_schema().to_owned(),
|
||||||
|
plan_decoder,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,8 +107,39 @@ impl DfTableSourceProvider {
|
|||||||
table: format_full_table_name(catalog_name, schema_name, table_name),
|
table: format_full_table_name(catalog_name, schema_name, table_name),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let provider = DfTableProviderAdapter::new(table);
|
let provider: Arc<dyn TableProvider> = if table.table_info().table_type == TableType::View {
|
||||||
let source = provider_as_source(Arc::new(provider));
|
let catalog_manager = self
|
||||||
|
.catalog_manager
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<KvBackendCatalogManager>()
|
||||||
|
.context(CastManagerSnafu)?;
|
||||||
|
|
||||||
|
let view_info = catalog_manager
|
||||||
|
.view_info_cache()?
|
||||||
|
.get(table.table_info().ident.table_id)
|
||||||
|
.await
|
||||||
|
.context(GetViewCacheSnafu)?
|
||||||
|
.context(ViewInfoNotFoundSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Build the catalog list provider for deserialization.
|
||||||
|
let catalog_list = Arc::new(DummyCatalogList::new(self.catalog_manager.clone()));
|
||||||
|
let logical_plan = self
|
||||||
|
.plan_decoder
|
||||||
|
.decode(Bytes::from(view_info.view_info.clone()), catalog_list, true)
|
||||||
|
.await
|
||||||
|
.context(DecodePlanSnafu {
|
||||||
|
name: &table.table_info().name,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Arc::new(ViewTable::try_new(logical_plan, None).context(DatafusionSnafu)?)
|
||||||
|
} else {
|
||||||
|
Arc::new(DfTableProviderAdapter::new(table))
|
||||||
|
};
|
||||||
|
|
||||||
|
let source = provider_as_source(provider);
|
||||||
|
|
||||||
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
let _ = self.resolved_tables.insert(resolved_name, source.clone());
|
||||||
Ok(source)
|
Ok(source)
|
||||||
}
|
}
|
||||||
@@ -103,6 +147,7 @@ impl DfTableSourceProvider {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use common_query::test_util::DummyDecoder;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -112,8 +157,12 @@ mod tests {
|
|||||||
fn test_validate_table_ref() {
|
fn test_validate_table_ref() {
|
||||||
let query_ctx = &QueryContext::with("greptime", "public");
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
|
||||||
let table_provider =
|
let table_provider = DfTableSourceProvider::new(
|
||||||
DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
|
MemoryCatalogManager::with_default_setup(),
|
||||||
|
true,
|
||||||
|
query_ctx,
|
||||||
|
DummyDecoder::arc(),
|
||||||
|
);
|
||||||
|
|
||||||
let table_ref = TableReference::bare("table_name");
|
let table_ref = TableReference::bare("table_name");
|
||||||
let result = table_provider.resolve_table_ref(table_ref);
|
let result = table_provider.resolve_table_ref(table_ref);
|
||||||
@@ -148,4 +197,99 @@ mod tests {
|
|||||||
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
let table_ref = TableReference::full("greptime", "greptime_private", "columns");
|
||||||
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
assert!(table_provider.resolve_table_ref(table_ref).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
|
||||||
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
|
use common_config::Mode;
|
||||||
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
|
use common_meta::key::TableMetadataManager;
|
||||||
|
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||||
|
use common_query::error::Result as QueryResult;
|
||||||
|
use common_query::logical_plan::SubstraitPlanDecoder;
|
||||||
|
use datafusion::catalog::CatalogProviderList;
|
||||||
|
use datafusion::logical_expr::builder::LogicalTableSource;
|
||||||
|
use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||||
|
|
||||||
|
struct MockDecoder;
|
||||||
|
impl MockDecoder {
|
||||||
|
pub fn arc() -> Arc<Self> {
|
||||||
|
Arc::new(MockDecoder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl SubstraitPlanDecoder for MockDecoder {
|
||||||
|
async fn decode(
|
||||||
|
&self,
|
||||||
|
_message: bytes::Bytes,
|
||||||
|
_catalog_list: Arc<dyn CatalogProviderList>,
|
||||||
|
_optimize: bool,
|
||||||
|
) -> QueryResult<LogicalPlan> {
|
||||||
|
Ok(mock_plan())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mock_plan() -> LogicalPlan {
|
||||||
|
let schema = Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int32, true),
|
||||||
|
Field::new("name", DataType::Utf8, true),
|
||||||
|
]);
|
||||||
|
let table_source = LogicalTableSource::new(SchemaRef::new(schema));
|
||||||
|
|
||||||
|
let projection = None;
|
||||||
|
|
||||||
|
let builder =
|
||||||
|
LogicalPlanBuilder::scan("person", Arc::new(table_source), projection).unwrap();
|
||||||
|
|
||||||
|
builder
|
||||||
|
.filter(col("id").gt(lit(500)))
|
||||||
|
.unwrap()
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_resolve_view() {
|
||||||
|
let query_ctx = &QueryContext::with("greptime", "public");
|
||||||
|
let backend = Arc::new(MemoryKvBackend::default());
|
||||||
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default()
|
||||||
|
.add_cache_registry(CacheRegistryBuilder::default().build());
|
||||||
|
let fundamental_cache_registry = build_fundamental_cache_registry(backend.clone());
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
|
Mode::Standalone,
|
||||||
|
None,
|
||||||
|
backend.clone(),
|
||||||
|
layered_cache_registry,
|
||||||
|
);
|
||||||
|
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||||
|
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||||
|
view_info.table_type = TableType::View;
|
||||||
|
let logical_plan = vec![1, 2, 3];
|
||||||
|
// Create view metadata
|
||||||
|
table_metadata_manager
|
||||||
|
.create_view_metadata(view_info.clone().into(), logical_plan, HashSet::new())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut table_provider =
|
||||||
|
DfTableSourceProvider::new(catalog_manager, true, query_ctx, MockDecoder::arc());
|
||||||
|
|
||||||
|
// View not found
|
||||||
|
let table_ref = TableReference::bare("not_exists_view");
|
||||||
|
assert!(table_provider.resolve_table(table_ref).await.is_err());
|
||||||
|
|
||||||
|
let table_ref = TableReference::bare(view_info.name);
|
||||||
|
let source = table_provider.resolve_table(table_ref).await.unwrap();
|
||||||
|
assert_eq!(*source.get_logical_plan().unwrap(), mock_plan());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
129
src/catalog/src/table_source/dummy_catalog.rs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Dummy catalog for region server.
|
||||||
|
|
||||||
|
use std::any::Any;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use common_catalog::format_full_table_name;
|
||||||
|
use datafusion::catalog::schema::SchemaProvider;
|
||||||
|
use datafusion::catalog::{CatalogProvider, CatalogProviderList};
|
||||||
|
use datafusion::datasource::TableProvider;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use table::table::adapter::DfTableProviderAdapter;
|
||||||
|
|
||||||
|
use crate::error::TableNotExistSnafu;
|
||||||
|
use crate::CatalogManagerRef;
|
||||||
|
|
||||||
|
/// Delegate the resolving requests to the `[CatalogManager]` unconditionally.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DummyCatalogList {
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DummyCatalogList {
|
||||||
|
/// Creates a new catalog list with the given catalog manager.
|
||||||
|
pub fn new(catalog_manager: CatalogManagerRef) -> Self {
|
||||||
|
Self { catalog_manager }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProviderList for DummyCatalogList {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register_catalog(
|
||||||
|
&self,
|
||||||
|
_name: String,
|
||||||
|
_catalog: Arc<dyn CatalogProvider>,
|
||||||
|
) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn catalog(&self, catalog_name: &str) -> Option<Arc<dyn CatalogProvider>> {
|
||||||
|
Some(Arc::new(DummyCatalogProvider {
|
||||||
|
catalog_name: catalog_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy catalog provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummyCatalogProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogProvider for DummyCatalogProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schema(&self, schema_name: &str) -> Option<Arc<dyn SchemaProvider>> {
|
||||||
|
Some(Arc::new(DummySchemaProvider {
|
||||||
|
catalog_name: self.catalog_name.clone(),
|
||||||
|
schema_name: schema_name.to_string(),
|
||||||
|
catalog_manager: self.catalog_manager.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A dummy schema provider for [DummyCatalogList].
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DummySchemaProvider {
|
||||||
|
catalog_name: String,
|
||||||
|
schema_name: String,
|
||||||
|
catalog_manager: CatalogManagerRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl SchemaProvider for DummySchemaProvider {
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn table(&self, name: &str) -> datafusion::error::Result<Option<Arc<dyn TableProvider>>> {
|
||||||
|
let table = self
|
||||||
|
.catalog_manager
|
||||||
|
.table(&self.catalog_name, &self.schema_name, name)
|
||||||
|
.await?
|
||||||
|
.with_context(|| TableNotExistSnafu {
|
||||||
|
table: format_full_table_name(&self.catalog_name, &self.schema_name, name),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let table_provider: Arc<dyn TableProvider> = Arc::new(DfTableProviderAdapter::new(table));
|
||||||
|
|
||||||
|
Ok(Some(table_provider))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn table_exist(&self, _name: &str) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -31,9 +31,11 @@ moka = { workspace = true, features = ["future"] }
|
|||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
|
substrait.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-stream = { workspace = true, features = ["net"] }
|
tokio-stream = { workspace = true, features = ["net"] }
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
@@ -42,7 +44,6 @@ tonic.workspace = true
|
|||||||
common-grpc-expr.workspace = true
|
common-grpc-expr.workspace = true
|
||||||
datanode.workspace = true
|
datanode.workspace = true
|
||||||
derive-new = "0.5"
|
derive-new = "0.5"
|
||||||
substrait.workspace = true
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
|
|||||||
@@ -173,14 +173,14 @@ impl Client {
|
|||||||
Ok(FlightClient { addr, client })
|
Ok(FlightClient { addr, client })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
|
pub(crate) fn raw_region_client(&self) -> Result<(String, PbRegionClient<Channel>)> {
|
||||||
let (_, channel) = self.find_channel()?;
|
let (addr, channel) = self.find_channel()?;
|
||||||
let client = PbRegionClient::new(channel)
|
let client = PbRegionClient::new(channel)
|
||||||
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
.max_decoding_message_size(self.max_grpc_recv_message_size())
|
||||||
.max_encoding_message_size(self.max_grpc_send_message_size())
|
.max_encoding_message_size(self.max_grpc_send_message_size())
|
||||||
.accept_compressed(CompressionEncoding::Zstd)
|
.accept_compressed(CompressionEncoding::Zstd)
|
||||||
.send_compressed(CompressionEncoding::Zstd);
|
.send_compressed(CompressionEncoding::Zstd);
|
||||||
Ok(client)
|
Ok((addr, client))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ use api::v1::{
|
|||||||
};
|
};
|
||||||
use arrow_flight::Ticket;
|
use arrow_flight::Ticket;
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
|
||||||
use client::{from_grpc_response, Client, Result};
|
|
||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
@@ -37,7 +35,8 @@ use prost::Message;
|
|||||||
use snafu::{ensure, ResultExt};
|
use snafu::{ensure, ResultExt};
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
|
use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
|
||||||
|
use crate::{from_grpc_response, Client, Result};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
@@ -105,10 +104,18 @@ impl Database {
|
|||||||
self.catalog = catalog.into();
|
self.catalog = catalog.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn catalog(&self) -> &String {
|
||||||
|
&self.catalog
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
pub fn set_schema(&mut self, schema: impl Into<String>) {
|
||||||
self.schema = schema.into();
|
self.schema = schema.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn schema(&self) -> &String {
|
||||||
|
&self.schema
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
|
||||||
self.timezone = timezone.into();
|
self.timezone = timezone.into();
|
||||||
}
|
}
|
||||||
@@ -156,6 +163,13 @@ impl Database {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
|
||||||
|
self.do_get(Request::Query(QueryRequest {
|
||||||
|
query: Some(Query::LogicalPlan(logical_plan)),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
|
||||||
self.do_get(Request::Ddl(DdlRequest {
|
self.do_get(Request::Ddl(DdlRequest {
|
||||||
expr: Some(DdlExpr::CreateTable(expr)),
|
expr: Some(DdlExpr::CreateTable(expr)),
|
||||||
@@ -269,17 +283,12 @@ struct FlightContext {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
use api::v1::auth_header::AuthScheme;
|
use api::v1::auth_header::AuthScheme;
|
||||||
use api::v1::{AuthHeader, Basic};
|
use api::v1::{AuthHeader, Basic};
|
||||||
use clap::Parser;
|
|
||||||
use client::Client;
|
|
||||||
use cmd::error::Result as CmdResult;
|
|
||||||
use cmd::options::GlobalOptions;
|
|
||||||
use cmd::{cli, standalone, App};
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_telemetry::logging::LoggingOptions;
|
|
||||||
|
|
||||||
use super::{Database, FlightContext};
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_flight_ctx() {
|
fn test_flight_ctx() {
|
||||||
@@ -295,76 +304,11 @@ mod tests {
|
|||||||
auth_scheme: Some(basic),
|
auth_scheme: Some(basic),
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(matches!(
|
assert_matches!(
|
||||||
ctx.auth_header,
|
ctx.auth_header,
|
||||||
Some(AuthHeader {
|
Some(AuthHeader {
|
||||||
auth_scheme: Some(AuthScheme::Basic(_)),
|
auth_scheme: Some(AuthScheme::Basic(_)),
|
||||||
})
|
})
|
||||||
))
|
)
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread")]
|
|
||||||
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
|
|
||||||
let standalone = standalone::Command::parse_from([
|
|
||||||
"standalone",
|
|
||||||
"start",
|
|
||||||
"--data-home",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
]);
|
|
||||||
|
|
||||||
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
|
||||||
let mut instance = standalone.build(standalone_opts).await?;
|
|
||||||
instance.start().await?;
|
|
||||||
|
|
||||||
let client = Client::with_urls(["127.0.0.1:4001"]);
|
|
||||||
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
|
||||||
database
|
|
||||||
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
database
|
|
||||||
.sql(
|
|
||||||
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
|
||||||
ts TIMESTAMP,
|
|
||||||
TIME INDEX (ts)
|
|
||||||
) engine=mito;
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let output_dir = tempfile::tempdir().unwrap();
|
|
||||||
let cli = cli::Command::parse_from([
|
|
||||||
"cli",
|
|
||||||
"export",
|
|
||||||
"--addr",
|
|
||||||
"127.0.0.1:4000",
|
|
||||||
"--output-dir",
|
|
||||||
&*output_dir.path().to_string_lossy(),
|
|
||||||
"--target",
|
|
||||||
"create-table",
|
|
||||||
]);
|
|
||||||
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
|
||||||
cli_app.start().await?;
|
|
||||||
|
|
||||||
instance.stop().await?;
|
|
||||||
|
|
||||||
let output_file = output_dir
|
|
||||||
.path()
|
|
||||||
.join("greptime-cli.export.create_table.sql");
|
|
||||||
let res = std::fs::read_to_string(output_file).unwrap();
|
|
||||||
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
|
||||||
"ts" TIMESTAMP(3) NOT NULL,
|
|
||||||
TIME INDEX ("ts")
|
|
||||||
)
|
|
||||||
|
|
||||||
ENGINE=mito
|
|
||||||
;
|
|
||||||
"#;
|
|
||||||
assert_eq!(res.trim(), expect.trim());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,8 +89,9 @@ pub enum Error {
|
|||||||
source: common_grpc::error::Error,
|
source: common_grpc::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to request RegionServer, code: {}", code))]
|
#[snafu(display("Failed to request RegionServer {}, code: {}", addr, code))]
|
||||||
RegionServer {
|
RegionServer {
|
||||||
|
addr: String,
|
||||||
code: Code,
|
code: Code,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -191,6 +192,9 @@ impl Error {
|
|||||||
} | Self::RegionServer {
|
} | Self::RegionServer {
|
||||||
code: Code::Unavailable,
|
code: Code::Unavailable,
|
||||||
..
|
..
|
||||||
|
} | Self::RegionServer {
|
||||||
|
code: Code::Unknown,
|
||||||
|
..
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,8 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#![feature(assert_matches)]
|
||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
pub mod client_manager;
|
pub mod client_manager;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
|
mod database;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod load_balance;
|
pub mod load_balance;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
@@ -29,6 +33,8 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
|||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
|
|
||||||
pub use self::client::Client;
|
pub use self::client::Client;
|
||||||
|
#[cfg(feature = "testing")]
|
||||||
|
pub use self::database::Database;
|
||||||
pub use self::error::{Error, Result};
|
pub use self::error::{Error, Result};
|
||||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::RegionRequest;
|
||||||
use api::v1::ResponseHeader;
|
use api::v1::ResponseHeader;
|
||||||
use arc_swap::ArcSwapOption;
|
use arc_swap::ArcSwapOption;
|
||||||
use arrow_flight::Ticket;
|
use arrow_flight::Ticket;
|
||||||
@@ -26,12 +26,15 @@ use common_error::status_code::StatusCode;
|
|||||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||||
use common_meta::node_manager::Datanode;
|
use common_meta::node_manager::Datanode;
|
||||||
|
use common_query::request::QueryRequest;
|
||||||
use common_recordbatch::error::ExternalSnafu;
|
use common_recordbatch::error::ExternalSnafu;
|
||||||
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
|
||||||
use common_telemetry::error;
|
use common_telemetry::error;
|
||||||
use common_telemetry::tracing_context::TracingContext;
|
use common_telemetry::tracing_context::TracingContext;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
|
use query::query_engine::DefaultSerializer;
|
||||||
use snafu::{location, Location, OptionExt, ResultExt};
|
use snafu::{location, Location, OptionExt, ResultExt};
|
||||||
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
@@ -63,6 +66,17 @@ impl Datanode for RegionRequester {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
|
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
|
||||||
|
let plan = DFLogicalSubstraitConvertor
|
||||||
|
.encode(&request.plan, DefaultSerializer)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(meta_error::ExternalSnafu)?
|
||||||
|
.to_vec();
|
||||||
|
let request = api::v1::region::QueryRequest {
|
||||||
|
header: request.header,
|
||||||
|
region_id: request.region_id.as_u64(),
|
||||||
|
plan,
|
||||||
|
};
|
||||||
|
|
||||||
let ticket = Ticket {
|
let ticket = Ticket {
|
||||||
ticket: request.encode_to_vec().into(),
|
ticket: request.encode_to_vec().into(),
|
||||||
};
|
};
|
||||||
@@ -177,7 +191,7 @@ impl RegionRequester {
|
|||||||
.with_label_values(&[request_type.as_str()])
|
.with_label_values(&[request_type.as_str()])
|
||||||
.start_timer();
|
.start_timer();
|
||||||
|
|
||||||
let mut client = self.client.raw_region_client()?;
|
let (addr, mut client) = self.client.raw_region_client()?;
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
.handle(request)
|
.handle(request)
|
||||||
@@ -187,6 +201,7 @@ impl RegionRequester {
|
|||||||
let err: error::Error = e.into();
|
let err: error::Error = e.into();
|
||||||
// Uses `Error::RegionServer` instead of `Error::Server`
|
// Uses `Error::RegionServer` instead of `Error::Server`
|
||||||
error::Error::RegionServer {
|
error::Error::RegionServer {
|
||||||
|
addr,
|
||||||
code,
|
code,
|
||||||
source: BoxedError::new(err),
|
source: BoxedError::new(err),
|
||||||
location: location!(),
|
location: location!(),
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ tracing-appender = "0.2"
|
|||||||
tikv-jemallocator = "0.5"
|
tikv-jemallocator = "0.5"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
client = { workspace = true, features = ["testing"] }
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
temp-env = "0.3"
|
temp-env = "0.3"
|
||||||
|
|||||||
@@ -22,18 +22,14 @@ mod helper;
|
|||||||
|
|
||||||
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
// mod repl;
|
mod repl;
|
||||||
// TODO(weny): Removes it
|
|
||||||
#[allow(deprecated)]
|
|
||||||
mod upgrade;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bench::BenchTableMetadataCommand;
|
use bench::BenchTableMetadataCommand;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
use common_telemetry::logging::{LoggingOptions, TracingOptions};
|
||||||
|
pub use repl::Repl;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
// pub use repl::Repl;
|
|
||||||
use upgrade::UpgradeCommand;
|
|
||||||
|
|
||||||
use self::export::ExportCommand;
|
use self::export::ExportCommand;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -116,7 +112,6 @@ impl Command {
|
|||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
// Attach(AttachCommand),
|
// Attach(AttachCommand),
|
||||||
Upgrade(UpgradeCommand),
|
|
||||||
Bench(BenchTableMetadataCommand),
|
Bench(BenchTableMetadataCommand),
|
||||||
Export(ExportCommand),
|
Export(ExportCommand),
|
||||||
}
|
}
|
||||||
@@ -125,7 +120,6 @@ impl SubCommand {
|
|||||||
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
// SubCommand::Attach(cmd) => cmd.build().await,
|
// SubCommand::Attach(cmd) => cmd.build().await,
|
||||||
SubCommand::Upgrade(cmd) => cmd.build(guard).await,
|
|
||||||
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
SubCommand::Bench(cmd) => cmd.build(guard).await,
|
||||||
SubCommand::Export(cmd) => cmd.build(guard).await,
|
SubCommand::Export(cmd) => cmd.build(guard).await,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
|||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
use common_meta::rpc::router::{Region, RegionRoute};
|
||||||
use common_meta::table_name::TableName;
|
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::data_type::ConcreteDataType;
|
||||||
use datatypes::schema::{ColumnSchema, RawSchema};
|
use datatypes::schema::{ColumnSchema, RawSchema};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
|
||||||
|
use table::table_name::TableName;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use self::metadata::TableMetadataBencher;
|
use self::metadata::TableMetadataBencher;
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
use common_meta::key::table_route::TableRouteValue;
|
||||||
use common_meta::key::TableMetadataManagerRef;
|
use common_meta::key::TableMetadataManagerRef;
|
||||||
use common_meta::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cli::bench::{
|
use crate::cli::bench::{
|
||||||
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
bench_self_recorded, create_region_routes, create_region_wal_options, create_table_info,
|
||||||
|
|||||||
@@ -434,3 +434,80 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
|
|||||||
Ok((catalog.to_string(), Some(schema.to_string())))
|
Ok((catalog.to_string(), Some(schema.to_string())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use clap::Parser;
|
||||||
|
use client::{Client, Database};
|
||||||
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
|
use common_telemetry::logging::LoggingOptions;
|
||||||
|
|
||||||
|
use crate::error::Result as CmdResult;
|
||||||
|
use crate::options::GlobalOptions;
|
||||||
|
use crate::{cli, standalone, App};
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
|
||||||
|
let output_dir = tempfile::tempdir().unwrap();
|
||||||
|
|
||||||
|
let standalone = standalone::Command::parse_from([
|
||||||
|
"standalone",
|
||||||
|
"start",
|
||||||
|
"--data-home",
|
||||||
|
&*output_dir.path().to_string_lossy(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
|
||||||
|
let mut instance = standalone.build(standalone_opts).await?;
|
||||||
|
instance.start().await?;
|
||||||
|
|
||||||
|
let client = Client::with_urls(["127.0.0.1:4001"]);
|
||||||
|
let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
|
||||||
|
database
|
||||||
|
.sql(r#"CREATE DATABASE "cli.export.create_table";"#)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
database
|
||||||
|
.sql(
|
||||||
|
r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
|
||||||
|
ts TIMESTAMP,
|
||||||
|
TIME INDEX (ts)
|
||||||
|
) engine=mito;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let output_dir = tempfile::tempdir().unwrap();
|
||||||
|
let cli = cli::Command::parse_from([
|
||||||
|
"cli",
|
||||||
|
"export",
|
||||||
|
"--addr",
|
||||||
|
"127.0.0.1:4000",
|
||||||
|
"--output-dir",
|
||||||
|
&*output_dir.path().to_string_lossy(),
|
||||||
|
"--target",
|
||||||
|
"create-table",
|
||||||
|
]);
|
||||||
|
let mut cli_app = cli.build(LoggingOptions::default()).await?;
|
||||||
|
cli_app.start().await?;
|
||||||
|
|
||||||
|
instance.stop().await?;
|
||||||
|
|
||||||
|
let output_file = output_dir
|
||||||
|
.path()
|
||||||
|
.join("greptime-cli.export.create_table.sql");
|
||||||
|
let res = std::fs::read_to_string(output_file).unwrap();
|
||||||
|
let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
|
||||||
|
"ts" TIMESTAMP(3) NOT NULL,
|
||||||
|
TIME INDEX ("ts")
|
||||||
|
)
|
||||||
|
|
||||||
|
ENGINE=mito
|
||||||
|
;
|
||||||
|
"#;
|
||||||
|
assert_eq!(res.trim(), expect.trim());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,14 +16,18 @@ use std::path::PathBuf;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use catalog::kvbackend::{
|
use cache::{
|
||||||
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
|
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
||||||
|
TABLE_ROUTE_CACHE_NAME,
|
||||||
};
|
};
|
||||||
use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use catalog::kvbackend::{
|
||||||
|
CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
|
||||||
|
};
|
||||||
|
use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_config::Mode;
|
use common_config::Mode;
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_meta::cache_invalidator::MultiCacheInvalidator;
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_query::Output;
|
use common_query::Output;
|
||||||
use common_recordbatch::RecordBatches;
|
use common_recordbatch::RecordBatches;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
@@ -33,17 +37,18 @@ use query::datafusion::DatafusionQueryEngine;
|
|||||||
use query::logical_optimizer::LogicalOptimizer;
|
use query::logical_optimizer::LogicalOptimizer;
|
||||||
use query::parser::QueryLanguageParser;
|
use query::parser::QueryLanguageParser;
|
||||||
use query::plan::LogicalPlan;
|
use query::plan::LogicalPlan;
|
||||||
use query::query_engine::QueryEngineState;
|
use query::query_engine::{DefaultSerializer, QueryEngineState};
|
||||||
use query::QueryEngine;
|
use query::QueryEngine;
|
||||||
use rustyline::error::ReadlineError;
|
use rustyline::error::ReadlineError;
|
||||||
use rustyline::Editor;
|
use rustyline::Editor;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
use snafu::ResultExt;
|
use snafu::{OptionExt, ResultExt};
|
||||||
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
|
||||||
|
|
||||||
use crate::cli::cmd::ReplCommand;
|
use crate::cli::cmd::ReplCommand;
|
||||||
use crate::cli::helper::RustylineHelper;
|
use crate::cli::helper::RustylineHelper;
|
||||||
use crate::cli::AttachCommand;
|
use crate::cli::AttachCommand;
|
||||||
|
use crate::error;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
|
||||||
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
|
||||||
@@ -180,7 +185,7 @@ impl Repl {
|
|||||||
.context(PlanStatementSnafu)?;
|
.context(PlanStatementSnafu)?;
|
||||||
|
|
||||||
let plan = DFLogicalSubstraitConvertor {}
|
let plan = DFLogicalSubstraitConvertor {}
|
||||||
.encode(&plan)
|
.encode(&plan, DefaultSerializer)
|
||||||
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
.context(SubstraitEncodeLogicalPlanSnafu)?;
|
||||||
|
|
||||||
self.database.logical_plan(plan.to_vec()).await
|
self.database.logical_plan(plan.to_vec()).await
|
||||||
@@ -257,19 +262,30 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
|
|||||||
|
|
||||||
let cached_meta_backend =
|
let cached_meta_backend =
|
||||||
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
|
||||||
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
|
let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
|
||||||
cached_meta_backend.clone(),
|
CacheRegistryBuilder::default()
|
||||||
]));
|
.add_cache(cached_meta_backend.clone())
|
||||||
let catalog_list = KvBackendCatalogManager::new(
|
.build(),
|
||||||
|
);
|
||||||
|
let fundamental_cache_registry =
|
||||||
|
build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
|
||||||
|
let layered_cache_registry = Arc::new(
|
||||||
|
with_default_composite_cache_registry(
|
||||||
|
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||||
|
)
|
||||||
|
.context(error::BuildCacheRegistrySnafu)?
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
Mode::Distributed,
|
Mode::Distributed,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
multi_cache_invalidator,
|
layered_cache_registry,
|
||||||
)
|
);
|
||||||
.await;
|
|
||||||
let plugins: Plugins = Default::default();
|
let plugins: Plugins = Default::default();
|
||||||
let state = Arc::new(QueryEngineState::new(
|
let state = Arc::new(QueryEngineState::new(
|
||||||
catalog_list,
|
catalog_manager,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
|||||||
@@ -1,584 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::api::v1::meta::TableRouteValue;
|
|
||||||
use common_meta::ddl::utils::region_storage_path;
|
|
||||||
use common_meta::error as MetaError;
|
|
||||||
use common_meta::key::catalog_name::{CatalogNameKey, CatalogNameValue};
|
|
||||||
use common_meta::key::datanode_table::{DatanodeTableKey, DatanodeTableValue, RegionInfo};
|
|
||||||
use common_meta::key::schema_name::{SchemaNameKey, SchemaNameValue};
|
|
||||||
use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
|
|
||||||
use common_meta::key::table_name::{TableNameKey, TableNameValue};
|
|
||||||
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
|
|
||||||
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
|
|
||||||
use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue};
|
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::range_stream::PaginationStream;
|
|
||||||
use common_meta::rpc::router::TableRoute;
|
|
||||||
use common_meta::rpc::store::{BatchDeleteRequest, BatchPutRequest, PutRequest, RangeRequest};
|
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use common_meta::util::get_prefix_end_key;
|
|
||||||
use common_telemetry::info;
|
|
||||||
use etcd_client::Client;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use prost::Message;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
|
||||||
use v1_helper::{CatalogKey as v1CatalogKey, SchemaKey as v1SchemaKey, TableGlobalValue};
|
|
||||||
|
|
||||||
use crate::cli::{Instance, Tool};
|
|
||||||
use crate::error::{self, ConnectEtcdSnafu, Result};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct UpgradeCommand {
|
|
||||||
#[clap(long)]
|
|
||||||
etcd_addr: String,
|
|
||||||
#[clap(long)]
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
#[clap(long)]
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UpgradeCommand {
|
|
||||||
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
|
|
||||||
let client = Client::connect([&self.etcd_addr], None)
|
|
||||||
.await
|
|
||||||
.context(ConnectEtcdSnafu {
|
|
||||||
etcd_addr: &self.etcd_addr,
|
|
||||||
})?;
|
|
||||||
let tool = MigrateTableMetadata {
|
|
||||||
etcd_store: EtcdStore::with_etcd_client(client, 128),
|
|
||||||
dryrun: self.dryrun,
|
|
||||||
skip_catalog_keys: self.skip_catalog_keys,
|
|
||||||
skip_table_global_keys: self.skip_table_global_keys,
|
|
||||||
skip_schema_keys: self.skip_schema_keys,
|
|
||||||
skip_table_route_keys: self.skip_table_route_keys,
|
|
||||||
};
|
|
||||||
Ok(Instance::new(Box::new(tool), guard))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigrateTableMetadata {
|
|
||||||
etcd_store: KvBackendRef,
|
|
||||||
dryrun: bool,
|
|
||||||
|
|
||||||
skip_table_global_keys: bool,
|
|
||||||
|
|
||||||
skip_catalog_keys: bool,
|
|
||||||
|
|
||||||
skip_schema_keys: bool,
|
|
||||||
|
|
||||||
skip_table_route_keys: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for MigrateTableMetadata {
|
|
||||||
// migrates database's metadata from 0.3 to 0.4.
|
|
||||||
async fn do_work(&self) -> Result<()> {
|
|
||||||
if !self.skip_table_global_keys {
|
|
||||||
self.migrate_table_global_values().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_catalog_keys {
|
|
||||||
self.migrate_catalog_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_schema_keys {
|
|
||||||
self.migrate_schema_keys().await?;
|
|
||||||
}
|
|
||||||
if !self.skip_table_route_keys {
|
|
||||||
self.migrate_table_route_keys().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const PAGE_SIZE: usize = 1000;
|
|
||||||
|
|
||||||
impl MigrateTableMetadata {
|
|
||||||
async fn migrate_table_route_keys(&self) -> Result<()> {
|
|
||||||
let key = b"__meta_table_route".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let value =
|
|
||||||
TableRouteValue::decode(&kv.value[..]).context(MetaError::DecodeProtoSnafu)?;
|
|
||||||
Ok((kv.key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let table_id = self.migrate_table_route_key(value).await?;
|
|
||||||
keys.push(key);
|
|
||||||
keys.push(TableRegionKey::new(table_id).to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableRouteKeys: {}", keys.len() / 2);
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_route_key(&self, value: TableRouteValue) -> Result<u32> {
|
|
||||||
let table_route = TableRoute::try_from_raw(
|
|
||||||
&value.peers,
|
|
||||||
value.table_route.expect("expected table_route"),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
|
|
||||||
|
|
||||||
let table_id = table_route.table.id as u32;
|
|
||||||
let new_key = TableRouteKey::new(table_id);
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(new_table_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(table_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_keys(&self) -> Result<()> {
|
|
||||||
// The schema key prefix.
|
|
||||||
let key = b"__s".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1SchemaKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("schema key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_schema_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated SchemaKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_schema_key(&self, key: &v1SchemaKey) -> Result<()> {
|
|
||||||
let new_key = SchemaNameKey::new(&key.catalog_name, &key.schema_name);
|
|
||||||
let schema_name_value = SchemaNameValue::default();
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(schema_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_keys(&self) -> Result<()> {
|
|
||||||
// The catalog key prefix.
|
|
||||||
let key = b"__c".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key_str =
|
|
||||||
std::str::from_utf8(&kv.key).context(MetaError::ConvertRawKeySnafu)?;
|
|
||||||
let key = v1CatalogKey::parse(key_str)
|
|
||||||
.unwrap_or_else(|e| panic!("catalog key is corrupted: {e}, key: {key_str}"));
|
|
||||||
|
|
||||||
Ok(key)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some(key) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
let _ = self.migrate_catalog_key(&key).await;
|
|
||||||
keys.push(key.to_string().as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
info!("Total migrated CatalogKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_catalog_key(&self, key: &v1CatalogKey) {
|
|
||||||
let new_key = CatalogNameKey::new(&key.catalog_name);
|
|
||||||
let catalog_name_value = CatalogNameValue;
|
|
||||||
|
|
||||||
info!("Creating '{new_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(new_key.to_bytes())
|
|
||||||
.with_value(catalog_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn migrate_table_global_values(&self) -> Result<()> {
|
|
||||||
let key = b"__tg".to_vec();
|
|
||||||
let range_end = get_prefix_end_key(&key);
|
|
||||||
|
|
||||||
let mut keys = Vec::new();
|
|
||||||
|
|
||||||
info!("Start scanning key from: {}", String::from_utf8_lossy(&key));
|
|
||||||
let mut stream = PaginationStream::new(
|
|
||||||
self.etcd_store.clone(),
|
|
||||||
RangeRequest::new().with_range(key, range_end.clone()),
|
|
||||||
PAGE_SIZE,
|
|
||||||
Arc::new(|kv: KeyValue| {
|
|
||||||
let key = String::from_utf8_lossy(kv.key()).to_string();
|
|
||||||
let value = TableGlobalValue::from_bytes(kv.value())
|
|
||||||
.unwrap_or_else(|e| panic!("table global value is corrupted: {e}, key: {key}"));
|
|
||||||
|
|
||||||
Ok((key, value))
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? {
|
|
||||||
self.create_table_name_key(&value).await;
|
|
||||||
|
|
||||||
self.create_datanode_table_keys(&value).await;
|
|
||||||
|
|
||||||
self.split_table_global_value(&key, value).await;
|
|
||||||
|
|
||||||
keys.push(key.as_bytes().to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Total migrated TableGlobalKeys: {}", keys.len());
|
|
||||||
self.delete_migrated_keys(keys).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_migrated_keys(&self, keys: Vec<Vec<u8>>) {
|
|
||||||
for keys in keys.chunks(PAGE_SIZE) {
|
|
||||||
info!("Deleting {} keys", keys.len());
|
|
||||||
let req = BatchDeleteRequest {
|
|
||||||
keys: keys.to_vec(),
|
|
||||||
prev_kv: false,
|
|
||||||
};
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store.batch_delete(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn split_table_global_value(&self, key: &str, value: TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let region_distribution: RegionDistribution = value.regions_id_map.into_iter().collect();
|
|
||||||
|
|
||||||
let table_info_key = TableInfoKey::new(table_id);
|
|
||||||
let table_info_value = TableInfoValue::new(value.table_info);
|
|
||||||
|
|
||||||
let table_region_key = TableRegionKey::new(table_id);
|
|
||||||
let table_region_value = TableRegionValue::new(region_distribution);
|
|
||||||
|
|
||||||
info!("Splitting TableGlobalKey '{key}' into '{table_info_key}' and '{table_region_key}'");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.batch_put(
|
|
||||||
BatchPutRequest::new()
|
|
||||||
.add_kv(
|
|
||||||
table_info_key.to_bytes(),
|
|
||||||
table_info_value.try_as_raw_value().unwrap(),
|
|
||||||
)
|
|
||||||
.add_kv(
|
|
||||||
table_region_key.to_bytes(),
|
|
||||||
table_region_value.try_as_raw_value().unwrap(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_table_name_key(&self, value: &TableGlobalValue) {
|
|
||||||
let table_info = &value.table_info;
|
|
||||||
let table_id = value.table_id();
|
|
||||||
|
|
||||||
let table_name_key = TableNameKey::new(
|
|
||||||
&table_info.catalog_name,
|
|
||||||
&table_info.schema_name,
|
|
||||||
&table_info.name,
|
|
||||||
);
|
|
||||||
let table_name_value = TableNameValue::new(table_id);
|
|
||||||
|
|
||||||
info!("Creating '{table_name_key}' => {table_id}");
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
self.etcd_store
|
|
||||||
.put(
|
|
||||||
PutRequest::new()
|
|
||||||
.with_key(table_name_key.to_bytes())
|
|
||||||
.with_value(table_name_value.try_as_raw_value().unwrap()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_datanode_table_keys(&self, value: &TableGlobalValue) {
|
|
||||||
let table_id = value.table_id();
|
|
||||||
let engine = value.table_info.meta.engine.as_str();
|
|
||||||
let region_storage_path = region_storage_path(
|
|
||||||
&value.table_info.catalog_name,
|
|
||||||
&value.table_info.schema_name,
|
|
||||||
);
|
|
||||||
let region_distribution: RegionDistribution =
|
|
||||||
value.regions_id_map.clone().into_iter().collect();
|
|
||||||
|
|
||||||
// TODO(niebayes): properly fetch or construct wal options.
|
|
||||||
let region_wal_options = HashMap::default();
|
|
||||||
|
|
||||||
let datanode_table_kvs = region_distribution
|
|
||||||
.into_iter()
|
|
||||||
.map(|(datanode_id, regions)| {
|
|
||||||
let k = DatanodeTableKey::new(datanode_id, table_id);
|
|
||||||
info!("Creating DatanodeTableKey '{k}' => {regions:?}");
|
|
||||||
(
|
|
||||||
k,
|
|
||||||
DatanodeTableValue::new(
|
|
||||||
table_id,
|
|
||||||
regions,
|
|
||||||
RegionInfo {
|
|
||||||
engine: engine.to_string(),
|
|
||||||
region_storage_path: region_storage_path.clone(),
|
|
||||||
region_options: (&value.table_info.meta.options).into(),
|
|
||||||
region_wal_options: region_wal_options.clone(),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if self.dryrun {
|
|
||||||
info!("Dryrun: do nothing");
|
|
||||||
} else {
|
|
||||||
let mut req = BatchPutRequest::new();
|
|
||||||
for (key, value) in datanode_table_kvs {
|
|
||||||
req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap());
|
|
||||||
}
|
|
||||||
self.etcd_store.batch_put(req).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[deprecated(since = "0.4.0", note = "Used for migrate old version(v0.3) metadata")]
|
|
||||||
mod v1_helper {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
|
|
||||||
use err::{DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu};
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
|
||||||
|
|
||||||
pub const CATALOG_KEY_PREFIX: &str = "__c";
|
|
||||||
pub const SCHEMA_KEY_PREFIX: &str = "__s";
|
|
||||||
|
|
||||||
/// The pattern of a valid catalog, schema or table name.
|
|
||||||
const NAME_PATTERN: &str = "[a-zA-Z_:][a-zA-Z0-9_:]*";
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref CATALOG_KEY_PATTERN: Regex =
|
|
||||||
Regex::new(&format!("^{CATALOG_KEY_PREFIX}-({NAME_PATTERN})$")).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
|
|
||||||
"^{SCHEMA_KEY_PREFIX}-({NAME_PATTERN})-({NAME_PATTERN})$"
|
|
||||||
))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Table global info contains necessary info for a datanode to create table regions, including
|
|
||||||
/// table id, table meta(schema...), region id allocation across datanodes.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
pub struct TableGlobalValue {
|
|
||||||
/// Id of datanode that created the global table info kv. only for debugging.
|
|
||||||
pub node_id: u64,
|
|
||||||
/// Allocation of region ids across all datanodes.
|
|
||||||
pub regions_id_map: HashMap<u64, Vec<u32>>,
|
|
||||||
pub table_info: RawTableInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableGlobalValue {
|
|
||||||
pub fn table_id(&self) -> TableId {
|
|
||||||
self.table_info.ident.table_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CatalogKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for CatalogKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(CATALOG_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CatalogKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = CATALOG_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 2, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CatalogValue;
|
|
||||||
|
|
||||||
pub struct SchemaKey {
|
|
||||||
pub catalog_name: String,
|
|
||||||
pub schema_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for SchemaKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str(SCHEMA_KEY_PREFIX)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.catalog_name)?;
|
|
||||||
f.write_str("-")?;
|
|
||||||
f.write_str(&self.schema_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SchemaKey {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
let key = s.as_ref();
|
|
||||||
let captures = SCHEMA_KEY_PATTERN
|
|
||||||
.captures(key)
|
|
||||||
.context(InvalidCatalogSnafu { key })?;
|
|
||||||
ensure!(captures.len() == 3, InvalidCatalogSnafu { key });
|
|
||||||
Ok(Self {
|
|
||||||
catalog_name: captures[1].to_string(),
|
|
||||||
schema_name: captures[2].to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct SchemaValue;
|
|
||||||
|
|
||||||
macro_rules! define_catalog_value {
|
|
||||||
( $($val_ty: ty), *) => {
|
|
||||||
$(
|
|
||||||
impl $val_ty {
|
|
||||||
pub fn parse(s: impl AsRef<str>) -> Result<Self, Error> {
|
|
||||||
serde_json::from_str(s.as_ref())
|
|
||||||
.context(DeserializeCatalogEntryValueSnafu { raw: s.as_ref() })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_bytes(bytes: impl AsRef<[u8]>) -> Result<Self, Error> {
|
|
||||||
Self::parse(&String::from_utf8_lossy(bytes.as_ref()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define_catalog_value!(TableGlobalValue);
|
|
||||||
|
|
||||||
mod err {
|
|
||||||
use snafu::{Location, Snafu};
|
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
|
||||||
#[snafu(visibility(pub))]
|
|
||||||
pub enum Error {
|
|
||||||
#[snafu(display("Invalid catalog info: {}", key))]
|
|
||||||
InvalidCatalog {
|
|
||||||
key: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to deserialize catalog entry value: {}", raw))]
|
|
||||||
DeserializeCatalogEntryValue {
|
|
||||||
raw: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: serde_json::error::Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -23,7 +23,6 @@ use common_telemetry::info;
|
|||||||
use common_telemetry::logging::TracingOptions;
|
use common_telemetry::logging::TracingOptions;
|
||||||
use common_version::{short_version, version};
|
use common_version::{short_version, version};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::DatanodeOptions;
|
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
use datanode::service::DatanodeServiceBuilder;
|
use datanode::service::DatanodeServiceBuilder;
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
@@ -34,11 +33,13 @@ use tracing_appender::non_blocking::WorkerGuard;
|
|||||||
use crate::error::{
|
use crate::error::{
|
||||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{log_versions, App};
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-datanode";
|
pub const APP_NAME: &str = "greptime-datanode";
|
||||||
|
|
||||||
|
type DatanodeOptions = GreptimeOptions<datanode::config::DatanodeOptions>;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
datanode: Datanode,
|
datanode: Datanode,
|
||||||
|
|
||||||
@@ -97,7 +98,9 @@ impl Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
self.subcmd.load_options(global_options)
|
match &self.subcmd {
|
||||||
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,12 +115,6 @@ impl SubCommand {
|
|||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
|
||||||
match self {
|
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Parser, Default)]
|
#[derive(Debug, Parser, Default)]
|
||||||
@@ -146,22 +143,25 @@ struct StartCommand {
|
|||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
self.merge_with_cli_options(
|
let mut opts = DatanodeOptions::load_layered_options(
|
||||||
global_options,
|
self.config_file.as_deref(),
|
||||||
DatanodeOptions::load_layered_options(
|
self.env_prefix.as_ref(),
|
||||||
self.config_file.as_deref(),
|
|
||||||
self.env_prefix.as_ref(),
|
|
||||||
)
|
|
||||||
.context(LoadLayeredConfigSnafu)?,
|
|
||||||
)
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||||
|
|
||||||
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The precedence order is: cli > config file > environment variables > default values.
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
fn merge_with_cli_options(
|
fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
global_options: &GlobalOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: DatanodeOptions,
|
opts: &mut DatanodeOptions,
|
||||||
) -> Result<DatanodeOptions> {
|
) -> Result<()> {
|
||||||
|
let opts = &mut opts.component;
|
||||||
|
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -231,25 +231,28 @@ impl StartCommand {
|
|||||||
// Disable dashboard in datanode.
|
// Disable dashboard in datanode.
|
||||||
opts.http.disable_dashboard = true;
|
opts.http.disable_dashboard = true;
|
||||||
|
|
||||||
Ok(opts)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(&self, mut opts: DatanodeOptions) -> Result<Instance> {
|
async fn build(&self, opts: DatanodeOptions) -> Result<Instance> {
|
||||||
|
common_runtime::init_global_runtimes(&opts.runtime);
|
||||||
|
|
||||||
let guard = common_telemetry::init_global_logging(
|
let guard = common_telemetry::init_global_logging(
|
||||||
APP_NAME,
|
APP_NAME,
|
||||||
&opts.logging,
|
&opts.component.logging,
|
||||||
&opts.tracing,
|
&opts.component.tracing,
|
||||||
opts.node_id.map(|x| x.to_string()),
|
opts.component.node_id.map(|x| x.to_string()),
|
||||||
);
|
);
|
||||||
log_versions(version!(), short_version!());
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
|
info!("Datanode start command: {:#?}", self);
|
||||||
|
info!("Datanode options: {:#?}", opts);
|
||||||
|
|
||||||
|
let mut opts = opts.component;
|
||||||
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
let plugins = plugins::setup_datanode_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
.context(StartDatanodeSnafu)?;
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
info!("Datanode start command: {:#?}", self);
|
|
||||||
info!("Datanode options: {:#?}", opts);
|
|
||||||
|
|
||||||
let node_id = opts
|
let node_id = opts
|
||||||
.node_id
|
.node_id
|
||||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||||
@@ -353,7 +356,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
|
||||||
assert_eq!(Some(42), options.node_id);
|
assert_eq!(Some(42), options.node_id);
|
||||||
@@ -414,7 +417,8 @@ mod tests {
|
|||||||
fn test_try_from_cmd() {
|
fn test_try_from_cmd() {
|
||||||
let opt = StartCommand::default()
|
let opt = StartCommand::default()
|
||||||
.load_options(&GlobalOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
assert_eq!(Mode::Standalone, opt.mode);
|
assert_eq!(Mode::Standalone, opt.mode);
|
||||||
|
|
||||||
let opt = (StartCommand {
|
let opt = (StartCommand {
|
||||||
@@ -423,7 +427,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
})
|
})
|
||||||
.load_options(&GlobalOptions::default())
|
.load_options(&GlobalOptions::default())
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
assert_eq!(Mode::Distributed, opt.mode);
|
assert_eq!(Mode::Distributed, opt.mode);
|
||||||
|
|
||||||
assert!((StartCommand {
|
assert!((StartCommand {
|
||||||
@@ -454,7 +459,8 @@ mod tests {
|
|||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
tokio_console_addr: None,
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
|
|
||||||
let logging_opt = options.logging;
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
@@ -536,7 +542,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||||
@@ -562,7 +568,10 @@ mod tests {
|
|||||||
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
|
assert_eq!(raft_engine_config.dir.unwrap(), "/other/wal/dir");
|
||||||
|
|
||||||
// Should be default value.
|
// Should be default value.
|
||||||
assert_eq!(opts.http.addr, DatanodeOptions::default().http.addr);
|
assert_eq!(
|
||||||
|
opts.http.addr,
|
||||||
|
DatanodeOptions::default().component.http.addr
|
||||||
|
);
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -163,6 +163,15 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to request database, sql: {sql}"))]
|
||||||
|
RequestDatabase {
|
||||||
|
sql: String,
|
||||||
|
#[snafu(source)]
|
||||||
|
source: client::Error,
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to collect RecordBatches"))]
|
#[snafu(display("Failed to collect RecordBatches"))]
|
||||||
CollectRecordBatches {
|
CollectRecordBatches {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -354,6 +363,7 @@ impl ErrorExt for Error {
|
|||||||
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
|
||||||
StatusCode::Internal
|
StatusCode::Internal
|
||||||
}
|
}
|
||||||
|
Error::RequestDatabase { source, .. } => source.status_code(),
|
||||||
Error::CollectRecordBatches { source, .. }
|
Error::CollectRecordBatches { source, .. }
|
||||||
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
|
||||||
Error::StartMetaClient { source, .. } => source.status_code(),
|
Error::StartMetaClient { source, .. } => source.status_code(),
|
||||||
@@ -365,11 +375,11 @@ impl ErrorExt for Error {
|
|||||||
|
|
||||||
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
Error::SerdeJson { .. } | Error::FileIo { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
|
||||||
|
|
||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||||
|
|
||||||
|
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,7 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
|
||||||
TABLE_ROUTE_CACHE_NAME,
|
|
||||||
};
|
|
||||||
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::client_manager::DatanodeClients;
|
use client::client_manager::DatanodeClients;
|
||||||
@@ -32,7 +29,6 @@ use common_telemetry::info;
|
|||||||
use common_telemetry::logging::TracingOptions;
|
use common_telemetry::logging::TracingOptions;
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use common_version::{short_version, version};
|
use common_version::{short_version, version};
|
||||||
use frontend::frontend::FrontendOptions;
|
|
||||||
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
|
||||||
use frontend::heartbeat::HeartbeatTask;
|
use frontend::heartbeat::HeartbeatTask;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
@@ -47,9 +43,11 @@ use tracing_appender::non_blocking::WorkerGuard;
|
|||||||
use crate::error::{
|
use crate::error::{
|
||||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
|
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result, StartFrontendSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{log_versions, App};
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
|
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
frontend: FeInstance,
|
frontend: FeInstance,
|
||||||
|
|
||||||
@@ -167,22 +165,25 @@ pub struct StartCommand {
|
|||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<FrontendOptions> {
|
||||||
self.merge_with_cli_options(
|
let mut opts = FrontendOptions::load_layered_options(
|
||||||
global_options,
|
self.config_file.as_deref(),
|
||||||
FrontendOptions::load_layered_options(
|
self.env_prefix.as_ref(),
|
||||||
self.config_file.as_deref(),
|
|
||||||
self.env_prefix.as_ref(),
|
|
||||||
)
|
|
||||||
.context(LoadLayeredConfigSnafu)?,
|
|
||||||
)
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||||
|
|
||||||
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The precedence order is: cli > config file > environment variables > default values.
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
fn merge_with_cli_options(
|
fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
global_options: &GlobalOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: FrontendOptions,
|
opts: &mut FrontendOptions,
|
||||||
) -> Result<FrontendOptions> {
|
) -> Result<()> {
|
||||||
|
let opts = &mut opts.component;
|
||||||
|
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -245,26 +246,29 @@ impl StartCommand {
|
|||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider.clone_from(&self.user_provider);
|
||||||
|
|
||||||
Ok(opts)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(&self, mut opts: FrontendOptions) -> Result<Instance> {
|
async fn build(&self, opts: FrontendOptions) -> Result<Instance> {
|
||||||
|
common_runtime::init_global_runtimes(&opts.runtime);
|
||||||
|
|
||||||
let guard = common_telemetry::init_global_logging(
|
let guard = common_telemetry::init_global_logging(
|
||||||
APP_NAME,
|
APP_NAME,
|
||||||
&opts.logging,
|
&opts.component.logging,
|
||||||
&opts.tracing,
|
&opts.component.tracing,
|
||||||
opts.node_id.clone(),
|
opts.component.node_id.clone(),
|
||||||
);
|
);
|
||||||
log_versions(version!(), short_version!());
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
|
info!("Frontend start command: {:#?}", self);
|
||||||
|
info!("Frontend options: {:#?}", opts);
|
||||||
|
|
||||||
|
let mut opts = opts.component;
|
||||||
#[allow(clippy::unnecessary_mut_passed)]
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
let plugins = plugins::setup_frontend_plugins(&mut opts)
|
||||||
.await
|
.await
|
||||||
.context(StartFrontendSnafu)?;
|
.context(StartFrontendSnafu)?;
|
||||||
|
|
||||||
info!("Frontend start command: {:#?}", self);
|
|
||||||
info!("Frontend options: {:#?}", opts);
|
|
||||||
|
|
||||||
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||||
|
|
||||||
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||||
@@ -302,25 +306,12 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_cache = layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let table_route_cache =
|
|
||||||
layered_cache_registry
|
|
||||||
.get()
|
|
||||||
.context(error::CacheRequiredSnafu {
|
|
||||||
name: TABLE_ROUTE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
opts.mode,
|
opts.mode,
|
||||||
Some(meta_client.clone()),
|
Some(meta_client.clone()),
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
table_cache,
|
layered_cache_registry.clone(),
|
||||||
table_route_cache,
|
);
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let executor = HandlerGroupExecutor::new(vec![
|
let executor = HandlerGroupExecutor::new(vec![
|
||||||
Arc::new(ParseMailboxMessageHandler),
|
Arc::new(ParseMailboxMessageHandler),
|
||||||
@@ -396,14 +387,14 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
assert_eq!(opts.http.addr, "127.0.0.1:1234");
|
assert_eq!(opts.http.addr, "127.0.0.1:1234");
|
||||||
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
assert_eq!(ReadableSize::mb(64), opts.http.body_limit);
|
||||||
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
assert_eq!(opts.mysql.addr, "127.0.0.1:5678");
|
||||||
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
assert_eq!(opts.postgres.addr, "127.0.0.1:5432");
|
||||||
|
|
||||||
let default_opts = FrontendOptions::default();
|
let default_opts = FrontendOptions::default().component;
|
||||||
|
|
||||||
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
|
assert_eq!(opts.grpc.addr, default_opts.grpc.addr);
|
||||||
assert!(opts.mysql.enable);
|
assert!(opts.mysql.enable);
|
||||||
@@ -444,7 +435,8 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
assert_eq!(Mode::Distributed, fe_opts.mode);
|
assert_eq!(Mode::Distributed, fe_opts.mode);
|
||||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||||
@@ -458,7 +450,7 @@ mod tests {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_try_from_start_command_to_anymap() {
|
async fn test_try_from_start_command_to_anymap() {
|
||||||
let mut fe_opts = FrontendOptions {
|
let mut fe_opts = frontend::frontend::FrontendOptions {
|
||||||
http: HttpOptions {
|
http: HttpOptions {
|
||||||
disable_dashboard: false,
|
disable_dashboard: false,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -495,7 +487,8 @@ mod tests {
|
|||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
tokio_console_addr: None,
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
|
|
||||||
let logging_opt = options.logging;
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
@@ -573,7 +566,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let fe_opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(fe_opts.mysql.runtime_size, 11);
|
assert_eq!(fe_opts.mysql.runtime_size, 11);
|
||||||
|
|||||||
@@ -21,14 +21,15 @@ use common_telemetry::info;
|
|||||||
use common_telemetry::logging::TracingOptions;
|
use common_telemetry::logging::TracingOptions;
|
||||||
use common_version::{short_version, version};
|
use common_version::{short_version, version};
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::MetasrvInstance;
|
||||||
use meta_srv::metasrv::MetasrvOptions;
|
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu};
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{log_versions, App};
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
|
type MetasrvOptions = GreptimeOptions<meta_srv::metasrv::MetasrvOptions>;
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-metasrv";
|
pub const APP_NAME: &str = "greptime-metasrv";
|
||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
@@ -139,22 +140,25 @@ struct StartCommand {
|
|||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
fn load_options(&self, global_options: &GlobalOptions) -> Result<MetasrvOptions> {
|
||||||
self.merge_with_cli_options(
|
let mut opts = MetasrvOptions::load_layered_options(
|
||||||
global_options,
|
self.config_file.as_deref(),
|
||||||
MetasrvOptions::load_layered_options(
|
self.env_prefix.as_ref(),
|
||||||
self.config_file.as_deref(),
|
|
||||||
self.env_prefix.as_ref(),
|
|
||||||
)
|
|
||||||
.context(LoadLayeredConfigSnafu)?,
|
|
||||||
)
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||||
|
|
||||||
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The precedence order is: cli > config file > environment variables > default values.
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
fn merge_with_cli_options(
|
fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
global_options: &GlobalOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: MetasrvOptions,
|
opts: &mut MetasrvOptions,
|
||||||
) -> Result<MetasrvOptions> {
|
) -> Result<()> {
|
||||||
|
let opts = &mut opts.component;
|
||||||
|
|
||||||
if let Some(dir) = &global_options.log_dir {
|
if let Some(dir) = &global_options.log_dir {
|
||||||
opts.logging.dir.clone_from(dir);
|
opts.logging.dir.clone_from(dir);
|
||||||
}
|
}
|
||||||
@@ -217,21 +221,28 @@ impl StartCommand {
|
|||||||
// Disable dashboard in metasrv.
|
// Disable dashboard in metasrv.
|
||||||
opts.http.disable_dashboard = true;
|
opts.http.disable_dashboard = true;
|
||||||
|
|
||||||
Ok(opts)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build(&self, mut opts: MetasrvOptions) -> Result<Instance> {
|
async fn build(&self, opts: MetasrvOptions) -> Result<Instance> {
|
||||||
let guard =
|
common_runtime::init_global_runtimes(&opts.runtime);
|
||||||
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
|
|
||||||
log_versions(version!(), short_version!());
|
|
||||||
|
|
||||||
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
let guard = common_telemetry::init_global_logging(
|
||||||
.await
|
APP_NAME,
|
||||||
.context(StartMetaServerSnafu)?;
|
&opts.component.logging,
|
||||||
|
&opts.component.tracing,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
info!("Metasrv start command: {:#?}", self);
|
info!("Metasrv start command: {:#?}", self);
|
||||||
info!("Metasrv options: {:#?}", opts);
|
info!("Metasrv options: {:#?}", opts);
|
||||||
|
|
||||||
|
let mut opts = opts.component;
|
||||||
|
let plugins = plugins::setup_metasrv_plugins(&mut opts)
|
||||||
|
.await
|
||||||
|
.context(StartMetaServerSnafu)?;
|
||||||
|
|
||||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||||
.await
|
.await
|
||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
@@ -266,7 +277,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
|
assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
|
||||||
assert_eq!(SelectorType::LoadBased, options.selector);
|
assert_eq!(SelectorType::LoadBased, options.selector);
|
||||||
@@ -299,7 +310,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
let options = cmd.load_options(&Default::default()).unwrap().component;
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
|
||||||
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
|
||||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||||
@@ -349,7 +360,8 @@ mod tests {
|
|||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
tokio_console_addr: None,
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
|
|
||||||
let logging_opt = options.logging;
|
let logging_opt = options.logging;
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||||
@@ -406,7 +418,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
assert_eq!(opts.bind_addr, "127.0.0.1:14002");
|
||||||
|
|||||||
@@ -13,6 +13,9 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use common_config::Configurable;
|
||||||
|
use common_runtime::global::RuntimeOptions;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Parser, Default, Debug, Clone)]
|
#[derive(Parser, Default, Debug, Clone)]
|
||||||
pub struct GlobalOptions {
|
pub struct GlobalOptions {
|
||||||
@@ -29,3 +32,22 @@ pub struct GlobalOptions {
|
|||||||
#[arg(global = true)]
|
#[arg(global = true)]
|
||||||
pub tokio_console_addr: Option<String>,
|
pub tokio_console_addr: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(LFC): Move logging and tracing options into global options, like the runtime options.
|
||||||
|
/// All the options of GreptimeDB.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct GreptimeOptions<T> {
|
||||||
|
/// The runtime options.
|
||||||
|
pub runtime: RuntimeOptions,
|
||||||
|
|
||||||
|
/// The options of each component (like Datanode or Standalone) of GreptimeDB.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub component: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Configurable> Configurable for GreptimeOptions<T> {
|
||||||
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
|
T::env_list_keys()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,10 +16,7 @@ use std::sync::Arc;
|
|||||||
use std::{fs, path};
|
use std::{fs, path};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
|
|
||||||
TABLE_ROUTE_CACHE_NAME,
|
|
||||||
};
|
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
use catalog::kvbackend::KvBackendCatalogManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||||
@@ -61,16 +58,16 @@ use servers::export_metrics::ExportMetricsOption;
|
|||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use servers::Mode;
|
use servers::Mode;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::ResultExt;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
BuildCacheRegistrySnafu, CacheRequiredSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
|
||||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result,
|
InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
|
||||||
ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
|
ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||||
StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::GlobalOptions;
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{log_versions, App};
|
use crate::{log_versions, App};
|
||||||
|
|
||||||
pub const APP_NAME: &str = "greptime-standalone";
|
pub const APP_NAME: &str = "greptime-standalone";
|
||||||
@@ -82,11 +79,14 @@ pub struct Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
pub async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
pub async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
|
||||||
self.subcmd.build(opts).await
|
self.subcmd.build(opts).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
pub fn load_options(
|
||||||
|
&self,
|
||||||
|
global_options: &GlobalOptions,
|
||||||
|
) -> Result<GreptimeOptions<StandaloneOptions>> {
|
||||||
self.subcmd.load_options(global_options)
|
self.subcmd.load_options(global_options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -97,20 +97,23 @@ enum SubCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.build(opts).await,
|
SubCommand::Start(cmd) => cmd.build(opts).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
fn load_options(
|
||||||
|
&self,
|
||||||
|
global_options: &GlobalOptions,
|
||||||
|
) -> Result<GreptimeOptions<StandaloneOptions>> {
|
||||||
match self {
|
match self {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct StandaloneOptions {
|
pub struct StandaloneOptions {
|
||||||
pub mode: Mode,
|
pub mode: Mode,
|
||||||
@@ -164,7 +167,7 @@ impl Default for StandaloneOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Configurable<'_> for StandaloneOptions {
|
impl Configurable for StandaloneOptions {
|
||||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
Some(&["wal.broker_endpoints"])
|
Some(&["wal.broker_endpoints"])
|
||||||
}
|
}
|
||||||
@@ -294,23 +297,27 @@ pub struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<StandaloneOptions> {
|
fn load_options(
|
||||||
self.merge_with_cli_options(
|
&self,
|
||||||
global_options,
|
global_options: &GlobalOptions,
|
||||||
StandaloneOptions::load_layered_options(
|
) -> Result<GreptimeOptions<StandaloneOptions>> {
|
||||||
self.config_file.as_deref(),
|
let mut opts = GreptimeOptions::<StandaloneOptions>::load_layered_options(
|
||||||
self.env_prefix.as_ref(),
|
self.config_file.as_deref(),
|
||||||
)
|
self.env_prefix.as_ref(),
|
||||||
.context(LoadLayeredConfigSnafu)?,
|
|
||||||
)
|
)
|
||||||
|
.context(LoadLayeredConfigSnafu)?;
|
||||||
|
|
||||||
|
self.merge_with_cli_options(global_options, &mut opts.component)?;
|
||||||
|
|
||||||
|
Ok(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The precedence order is: cli > config file > environment variables > default values.
|
// The precedence order is: cli > config file > environment variables > default values.
|
||||||
pub fn merge_with_cli_options(
|
pub fn merge_with_cli_options(
|
||||||
&self,
|
&self,
|
||||||
global_options: &GlobalOptions,
|
global_options: &GlobalOptions,
|
||||||
mut opts: StandaloneOptions,
|
opts: &mut StandaloneOptions,
|
||||||
) -> Result<StandaloneOptions> {
|
) -> Result<()> {
|
||||||
// Should always be standalone mode.
|
// Should always be standalone mode.
|
||||||
opts.mode = Mode::Standalone;
|
opts.mode = Mode::Standalone;
|
||||||
|
|
||||||
@@ -372,20 +379,27 @@ impl StartCommand {
|
|||||||
|
|
||||||
opts.user_provider.clone_from(&self.user_provider);
|
opts.user_provider.clone_from(&self.user_provider);
|
||||||
|
|
||||||
Ok(opts)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unreachable_code)]
|
#[allow(unreachable_code)]
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
#[allow(clippy::diverging_sub_expression)]
|
#[allow(clippy::diverging_sub_expression)]
|
||||||
async fn build(&self, opts: StandaloneOptions) -> Result<Instance> {
|
async fn build(&self, opts: GreptimeOptions<StandaloneOptions>) -> Result<Instance> {
|
||||||
let guard =
|
common_runtime::init_global_runtimes(&opts.runtime);
|
||||||
common_telemetry::init_global_logging(APP_NAME, &opts.logging, &opts.tracing, None);
|
|
||||||
|
let guard = common_telemetry::init_global_logging(
|
||||||
|
APP_NAME,
|
||||||
|
&opts.component.logging,
|
||||||
|
&opts.component.tracing,
|
||||||
|
None,
|
||||||
|
);
|
||||||
log_versions(version!(), short_version!());
|
log_versions(version!(), short_version!());
|
||||||
|
|
||||||
info!("Standalone start command: {:#?}", self);
|
info!("Standalone start command: {:#?}", self);
|
||||||
info!("Building standalone instance with {opts:#?}");
|
info!("Standalone options: {opts:#?}");
|
||||||
|
|
||||||
|
let opts = opts.component;
|
||||||
let mut fe_opts = opts.frontend_options();
|
let mut fe_opts = opts.frontend_options();
|
||||||
#[allow(clippy::unnecessary_mut_passed)]
|
#[allow(clippy::unnecessary_mut_passed)]
|
||||||
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
let fe_plugins = plugins::setup_frontend_plugins(&mut fe_opts) // mut ref is MUST, DO NOT change it
|
||||||
@@ -421,20 +435,12 @@ impl StartCommand {
|
|||||||
.build(),
|
.build(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
|
||||||
name: TABLE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let table_route_cache = layered_cache_registry.get().context(CacheRequiredSnafu {
|
|
||||||
name: TABLE_ROUTE_CACHE_NAME,
|
|
||||||
})?;
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
dn_opts.mode,
|
dn_opts.mode,
|
||||||
None,
|
None,
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
table_cache,
|
layered_cache_registry.clone(),
|
||||||
table_route_cache,
|
);
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let table_metadata_manager =
|
let table_metadata_manager =
|
||||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||||
@@ -448,9 +454,11 @@ impl StartCommand {
|
|||||||
);
|
);
|
||||||
let flownode = Arc::new(flow_builder.build().await);
|
let flownode = Arc::new(flow_builder.build().await);
|
||||||
|
|
||||||
let builder =
|
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
|
||||||
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
|
.with_kv_backend(kv_backend.clone())
|
||||||
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
|
.build()
|
||||||
|
.await
|
||||||
|
.context(StartDatanodeSnafu)?;
|
||||||
|
|
||||||
let node_manager = Arc::new(StandaloneDatanodeManager {
|
let node_manager = Arc::new(StandaloneDatanodeManager {
|
||||||
region_server: datanode.region_server(),
|
region_server: datanode.region_server(),
|
||||||
@@ -675,7 +683,10 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let options = cmd.load_options(&GlobalOptions::default()).unwrap();
|
let options = cmd
|
||||||
|
.load_options(&GlobalOptions::default())
|
||||||
|
.unwrap()
|
||||||
|
.component;
|
||||||
let fe_opts = options.frontend_options();
|
let fe_opts = options.frontend_options();
|
||||||
let dn_opts = options.datanode_options();
|
let dn_opts = options.datanode_options();
|
||||||
let logging_opts = options.logging;
|
let logging_opts = options.logging;
|
||||||
@@ -736,7 +747,8 @@ mod tests {
|
|||||||
#[cfg(feature = "tokio-console")]
|
#[cfg(feature = "tokio-console")]
|
||||||
tokio_console_addr: None,
|
tokio_console_addr: None,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.component;
|
||||||
|
|
||||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||||
assert_eq!("debug", opts.logging.level.unwrap());
|
assert_eq!("debug", opts.logging.level.unwrap());
|
||||||
@@ -798,7 +810,7 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let opts = command.load_options(&GlobalOptions::default()).unwrap();
|
let opts = command.load_options(&Default::default()).unwrap().component;
|
||||||
|
|
||||||
// Should be read from env, env > default values.
|
// Should be read from env, env > default values.
|
||||||
assert_eq!(opts.logging.dir, "/other/log/dir");
|
assert_eq!(opts.logging.dir, "/other/log/dir");
|
||||||
|
|||||||
231
src/cmd/tests/load_config_test.rs
Normal file
231
src/cmd/tests/load_config_test.rs
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use cmd::options::GreptimeOptions;
|
||||||
|
use cmd::standalone::StandaloneOptions;
|
||||||
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use common_config::Configurable;
|
||||||
|
use common_runtime::global::RuntimeOptions;
|
||||||
|
use common_telemetry::logging::LoggingOptions;
|
||||||
|
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||||
|
use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
|
||||||
|
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||||
|
use frontend::frontend::FrontendOptions;
|
||||||
|
use frontend::service_config::datanode::DatanodeClientOptions;
|
||||||
|
use meta_client::MetaClientOptions;
|
||||||
|
use meta_srv::metasrv::MetasrvOptions;
|
||||||
|
use meta_srv::selector::SelectorType;
|
||||||
|
use mito2::config::MitoConfig;
|
||||||
|
use servers::export_metrics::ExportMetricsOption;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_load_datanode_example_config() {
|
||||||
|
let example_config = common_test_util::find_workspace_path("config/datanode.example.toml");
|
||||||
|
let options =
|
||||||
|
GreptimeOptions::<DatanodeOptions>::load_layered_options(example_config.to_str(), "")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let expected = GreptimeOptions::<DatanodeOptions> {
|
||||||
|
runtime: RuntimeOptions {
|
||||||
|
read_rt_size: 8,
|
||||||
|
write_rt_size: 8,
|
||||||
|
bg_rt_size: 8,
|
||||||
|
},
|
||||||
|
component: DatanodeOptions {
|
||||||
|
node_id: Some(42),
|
||||||
|
rpc_hostname: Some("127.0.0.1".to_string()),
|
||||||
|
meta_client: Some(MetaClientOptions {
|
||||||
|
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||||
|
timeout: Duration::from_secs(3),
|
||||||
|
heartbeat_timeout: Duration::from_millis(500),
|
||||||
|
ddl_timeout: Duration::from_secs(10),
|
||||||
|
connect_timeout: Duration::from_secs(1),
|
||||||
|
tcp_nodelay: true,
|
||||||
|
metadata_cache_max_capacity: 100000,
|
||||||
|
metadata_cache_ttl: Duration::from_secs(600),
|
||||||
|
metadata_cache_tti: Duration::from_secs(300),
|
||||||
|
}),
|
||||||
|
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||||
|
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||||
|
sync_period: Some(Duration::from_secs(10)),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
storage: StorageConfig {
|
||||||
|
data_home: "/tmp/greptimedb/".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||||
|
num_workers: 8,
|
||||||
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
|
scan_parallelism: 0,
|
||||||
|
global_write_buffer_size: ReadableSize::gb(1),
|
||||||
|
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||||
|
sst_meta_cache_size: ReadableSize::mb(128),
|
||||||
|
vector_cache_size: ReadableSize::mb(512),
|
||||||
|
page_cache_size: ReadableSize::mb(512),
|
||||||
|
max_background_jobs: 4,
|
||||||
|
..Default::default()
|
||||||
|
})],
|
||||||
|
logging: LoggingOptions {
|
||||||
|
level: Some("info".to_string()),
|
||||||
|
otlp_endpoint: Some("".to_string()),
|
||||||
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
export_metrics: ExportMetricsOption {
|
||||||
|
self_import: Some(Default::default()),
|
||||||
|
remote_write: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(options, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_load_frontend_example_config() {
|
||||||
|
let example_config = common_test_util::find_workspace_path("config/frontend.example.toml");
|
||||||
|
let options =
|
||||||
|
GreptimeOptions::<FrontendOptions>::load_layered_options(example_config.to_str(), "")
|
||||||
|
.unwrap();
|
||||||
|
let expected = GreptimeOptions::<FrontendOptions> {
|
||||||
|
runtime: RuntimeOptions {
|
||||||
|
read_rt_size: 8,
|
||||||
|
write_rt_size: 8,
|
||||||
|
bg_rt_size: 8,
|
||||||
|
},
|
||||||
|
component: FrontendOptions {
|
||||||
|
default_timezone: Some("UTC".to_string()),
|
||||||
|
meta_client: Some(MetaClientOptions {
|
||||||
|
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||||
|
timeout: Duration::from_secs(3),
|
||||||
|
heartbeat_timeout: Duration::from_millis(500),
|
||||||
|
ddl_timeout: Duration::from_secs(10),
|
||||||
|
connect_timeout: Duration::from_secs(1),
|
||||||
|
tcp_nodelay: true,
|
||||||
|
metadata_cache_max_capacity: 100000,
|
||||||
|
metadata_cache_ttl: Duration::from_secs(600),
|
||||||
|
metadata_cache_tti: Duration::from_secs(300),
|
||||||
|
}),
|
||||||
|
logging: LoggingOptions {
|
||||||
|
level: Some("info".to_string()),
|
||||||
|
otlp_endpoint: Some("".to_string()),
|
||||||
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
datanode: frontend::service_config::DatanodeOptions {
|
||||||
|
client: DatanodeClientOptions {
|
||||||
|
connect_timeout: Duration::from_secs(10),
|
||||||
|
tcp_nodelay: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
export_metrics: ExportMetricsOption {
|
||||||
|
self_import: Some(Default::default()),
|
||||||
|
remote_write: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assert_eq!(options, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_load_metasrv_example_config() {
|
||||||
|
let example_config = common_test_util::find_workspace_path("config/metasrv.example.toml");
|
||||||
|
let options =
|
||||||
|
GreptimeOptions::<MetasrvOptions>::load_layered_options(example_config.to_str(), "")
|
||||||
|
.unwrap();
|
||||||
|
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||||
|
runtime: RuntimeOptions {
|
||||||
|
read_rt_size: 8,
|
||||||
|
write_rt_size: 8,
|
||||||
|
bg_rt_size: 8,
|
||||||
|
},
|
||||||
|
component: MetasrvOptions {
|
||||||
|
selector: SelectorType::LeaseBased,
|
||||||
|
data_home: "/tmp/metasrv/".to_string(),
|
||||||
|
logging: LoggingOptions {
|
||||||
|
dir: "/tmp/greptimedb/logs".to_string(),
|
||||||
|
level: Some("info".to_string()),
|
||||||
|
otlp_endpoint: Some("".to_string()),
|
||||||
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
export_metrics: ExportMetricsOption {
|
||||||
|
self_import: Some(Default::default()),
|
||||||
|
remote_write: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assert_eq!(options, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_load_standalone_example_config() {
|
||||||
|
let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
|
||||||
|
let options =
|
||||||
|
GreptimeOptions::<StandaloneOptions>::load_layered_options(example_config.to_str(), "")
|
||||||
|
.unwrap();
|
||||||
|
let expected = GreptimeOptions::<StandaloneOptions> {
|
||||||
|
runtime: RuntimeOptions {
|
||||||
|
read_rt_size: 8,
|
||||||
|
write_rt_size: 8,
|
||||||
|
bg_rt_size: 8,
|
||||||
|
},
|
||||||
|
component: StandaloneOptions {
|
||||||
|
default_timezone: Some("UTC".to_string()),
|
||||||
|
wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
|
||||||
|
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||||
|
sync_period: Some(Duration::from_secs(10)),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
region_engine: vec![RegionEngineConfig::Mito(MitoConfig {
|
||||||
|
num_workers: 8,
|
||||||
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
|
scan_parallelism: 0,
|
||||||
|
global_write_buffer_size: ReadableSize::gb(1),
|
||||||
|
global_write_buffer_reject_size: ReadableSize::gb(2),
|
||||||
|
sst_meta_cache_size: ReadableSize::mb(128),
|
||||||
|
vector_cache_size: ReadableSize::mb(512),
|
||||||
|
page_cache_size: ReadableSize::mb(512),
|
||||||
|
max_background_jobs: 4,
|
||||||
|
..Default::default()
|
||||||
|
})],
|
||||||
|
storage: StorageConfig {
|
||||||
|
data_home: "/tmp/greptimedb/".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
logging: LoggingOptions {
|
||||||
|
level: Some("info".to_string()),
|
||||||
|
otlp_endpoint: Some("".to_string()),
|
||||||
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
export_metrics: ExportMetricsOption {
|
||||||
|
self_import: Some(Default::default()),
|
||||||
|
remote_write: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assert_eq!(options, expected);
|
||||||
|
}
|
||||||
@@ -13,7 +13,8 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use config::{Environment, File, FileFormat};
|
use config::{Environment, File, FileFormat};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde::Serialize;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu};
|
||||||
@@ -25,7 +26,7 @@ pub const ENV_VAR_SEP: &str = "__";
|
|||||||
pub const ENV_LIST_SEP: &str = ",";
|
pub const ENV_LIST_SEP: &str = ",";
|
||||||
|
|
||||||
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
|
/// Configuration trait defines the common interface for configuration that can be loaded from multiple sources and serialized to TOML.
|
||||||
pub trait Configurable<'de>: Serialize + Deserialize<'de> + Default + Sized {
|
pub trait Configurable: Serialize + DeserializeOwned + Default + Sized {
|
||||||
/// Load the configuration from multiple sources and merge them.
|
/// Load the configuration from multiple sources and merge them.
|
||||||
/// The precedence order is: config file > environment variables > default values.
|
/// The precedence order is: config file > environment variables > default values.
|
||||||
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
/// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
|
||||||
@@ -128,7 +129,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Configurable<'_> for TestDatanodeConfig {
|
impl Configurable for TestDatanodeConfig {
|
||||||
fn env_list_keys() -> Option<&'static [&'static str]> {
|
fn env_list_keys() -> Option<&'static [&'static str]> {
|
||||||
Some(&["meta_client.metasrv_addrs"])
|
Some(&["meta_client.metasrv_addrs"])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ async-compression = { version = "0.3", features = [
|
|||||||
] }
|
] }
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
|
common-base.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
@@ -33,6 +34,7 @@ object-store.workspace = true
|
|||||||
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
|
orc-rust = { git = "https://github.com/datafusion-contrib/datafusion-orc.git", rev = "502217315726314c4008808fe169764529640599" }
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
paste = "1.0"
|
paste = "1.0"
|
||||||
|
rand.workspace = true
|
||||||
regex = "1.7"
|
regex = "1.7"
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
@@ -42,4 +44,7 @@ tokio-util.workspace = true
|
|||||||
url = "2.3"
|
url = "2.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
common-telemetry.workspace = true
|
||||||
common-test-util.workspace = true
|
common-test-util.workspace = true
|
||||||
|
dotenv.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
|||||||
@@ -92,34 +92,44 @@ impl CompressionType {
|
|||||||
macro_rules! impl_compression_type {
|
macro_rules! impl_compression_type {
|
||||||
($(($enum_item:ident, $prefix:ident)),*) => {
|
($(($enum_item:ident, $prefix:ident)),*) => {
|
||||||
paste::item! {
|
paste::item! {
|
||||||
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
|
|
||||||
impl CompressionType {
|
impl CompressionType {
|
||||||
pub async fn encode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
pub async fn encode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
CompressionType::$enum_item => {
|
CompressionType::$enum_item => {
|
||||||
let mut buffer = Vec::with_capacity(content.as_ref().len());
|
let mut buffer = Vec::with_capacity(content.remaining());
|
||||||
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
let mut encoder = write::[<$prefix Encoder>]::new(&mut buffer);
|
||||||
encoder.write_all(content.as_ref()).await?;
|
encoder.write_all_buf(&mut content).await?;
|
||||||
encoder.shutdown().await?;
|
encoder.shutdown().await?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
CompressionType::Uncompressed => {
|
||||||
|
let mut bs = BytesMut::with_capacity(content.remaining());
|
||||||
|
bs.put(content);
|
||||||
|
Ok(bs.to_vec())
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn decode(&self, content: impl AsRef<[u8]>) -> io::Result<Vec<u8>> {
|
pub async fn decode<B: Buf>(&self, mut content: B) -> io::Result<Vec<u8>> {
|
||||||
match self {
|
match self {
|
||||||
$(
|
$(
|
||||||
CompressionType::$enum_item => {
|
CompressionType::$enum_item => {
|
||||||
let mut buffer = Vec::with_capacity(content.as_ref().len() * 2);
|
let mut buffer = Vec::with_capacity(content.remaining() * 2);
|
||||||
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
let mut encoder = write::[<$prefix Decoder>]::new(&mut buffer);
|
||||||
encoder.write_all(content.as_ref()).await?;
|
encoder.write_all_buf(&mut content).await?;
|
||||||
encoder.shutdown().await?;
|
encoder.shutdown().await?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
CompressionType::Uncompressed => Ok(content.as_ref().to_vec()),
|
CompressionType::Uncompressed => {
|
||||||
|
let mut bs = BytesMut::with_capacity(content.remaining());
|
||||||
|
bs.put(content);
|
||||||
|
Ok(bs.to_vec())
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,13 +161,13 @@ macro_rules! impl_compression_type {
|
|||||||
$(
|
$(
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn [<test_ $enum_item:lower _compression>]() {
|
async fn [<test_ $enum_item:lower _compression>]() {
|
||||||
let string = "foo_bar".as_bytes().to_vec();
|
let string = "foo_bar".as_bytes();
|
||||||
let compress = CompressionType::$enum_item
|
let compress = CompressionType::$enum_item
|
||||||
.encode(&string)
|
.encode(string)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let decompress = CompressionType::$enum_item
|
let decompress = CompressionType::$enum_item
|
||||||
.decode(&compress)
|
.decode(compress.as_slice())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(decompress, string);
|
assert_eq!(decompress, string);
|
||||||
@@ -165,13 +175,13 @@ macro_rules! impl_compression_type {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_uncompression() {
|
async fn test_uncompression() {
|
||||||
let string = "foo_bar".as_bytes().to_vec();
|
let string = "foo_bar".as_bytes();
|
||||||
let compress = CompressionType::Uncompressed
|
let compress = CompressionType::Uncompressed
|
||||||
.encode(&string)
|
.encode(string)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let decompress = CompressionType::Uncompressed
|
let decompress = CompressionType::Uncompressed
|
||||||
.decode(&compress)
|
.decode(compress.as_slice())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(decompress, string);
|
assert_eq!(decompress, string);
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ use datafusion::physical_plan::SendableRecordBatchStream;
|
|||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||||
|
|
||||||
use self::csv::CsvFormat;
|
use self::csv::CsvFormat;
|
||||||
use self::json::JsonFormat;
|
use self::json::JsonFormat;
|
||||||
@@ -45,6 +46,7 @@ use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
|||||||
use crate::compression::CompressionType;
|
use crate::compression::CompressionType;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||||
|
|
||||||
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type";
|
||||||
pub const FORMAT_DELIMITER: &str = "delimiter";
|
pub const FORMAT_DELIMITER: &str = "delimiter";
|
||||||
@@ -146,7 +148,8 @@ pub fn open_with_decoder<T: ArrowDecoder, F: Fn() -> DataFusionResult<T>>(
|
|||||||
let reader = object_store
|
let reader = object_store
|
||||||
.reader(&path)
|
.reader(&path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
.map_err(|e| DataFusionError::External(Box::new(e)))?
|
||||||
|
.into_bytes_stream(..);
|
||||||
|
|
||||||
let mut upstream = compression_type.convert_stream(reader).fuse();
|
let mut upstream = compression_type.convert_stream(reader).fuse();
|
||||||
|
|
||||||
@@ -202,7 +205,9 @@ pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
|||||||
store
|
store
|
||||||
.writer_with(&path)
|
.writer_with(&path)
|
||||||
.concurrent(concurrency)
|
.concurrent(concurrency)
|
||||||
|
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||||
.await
|
.await
|
||||||
|
.map(|v| v.into_futures_async_write().compat_write())
|
||||||
.context(error::WriteObjectSnafu { path })
|
.context(error::WriteObjectSnafu { path })
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ use datafusion::physical_plan::SendableRecordBatchStream;
|
|||||||
use derive_builder::Builder;
|
use derive_builder::Builder;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio_util::compat::FuturesAsyncReadCompatExt;
|
||||||
use tokio_util::io::SyncIoBridge;
|
use tokio_util::io::SyncIoBridge;
|
||||||
|
|
||||||
use super::stream_to_file;
|
use super::stream_to_file;
|
||||||
@@ -164,10 +165,16 @@ impl FileOpener for CsvOpener {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for CsvFormat {
|
impl FileFormat for CsvFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
|
let meta = store
|
||||||
|
.stat(path)
|
||||||
|
.await
|
||||||
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
.context(error::ReadObjectSnafu { path })?
|
||||||
|
.into_futures_async_read(0..meta.content_length())
|
||||||
|
.compat();
|
||||||
|
|
||||||
let decoded = self.compression_type.convert_async_read(reader);
|
let decoded = self.compression_type.convert_async_read(reader);
|
||||||
|
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
|||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio_util::compat::FuturesAsyncReadCompatExt;
|
||||||
use tokio_util::io::SyncIoBridge;
|
use tokio_util::io::SyncIoBridge;
|
||||||
|
|
||||||
use super::stream_to_file;
|
use super::stream_to_file;
|
||||||
@@ -82,10 +83,16 @@ impl Default for JsonFormat {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for JsonFormat {
|
impl FileFormat for JsonFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
|
let meta = store
|
||||||
|
.stat(path)
|
||||||
|
.await
|
||||||
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
.context(error::ReadObjectSnafu { path })?
|
||||||
|
.into_futures_async_read(0..meta.content_length())
|
||||||
|
.compat();
|
||||||
|
|
||||||
let decoded = self.compression_type.convert_async_read(reader);
|
let decoded = self.compression_type.convert_async_read(reader);
|
||||||
|
|
||||||
|
|||||||
@@ -16,15 +16,17 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
use arrow_schema::{ArrowError, Schema, SchemaRef};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use bytes::Bytes;
|
||||||
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
|
use common_recordbatch::adapter::RecordBatchStreamTypeAdapter;
|
||||||
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
|
use datafusion::datasource::physical_plan::{FileMeta, FileOpenFuture, FileOpener};
|
||||||
use datafusion::error::{DataFusionError, Result as DfResult};
|
use datafusion::error::{DataFusionError, Result as DfResult};
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::future::BoxFuture;
|
||||||
|
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
use orc_rust::arrow_reader::ArrowReaderBuilder;
|
||||||
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
use orc_rust::async_arrow_reader::ArrowStreamReader;
|
||||||
|
use orc_rust::reader::AsyncChunkReader;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tokio::io::{AsyncRead, AsyncSeek};
|
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::file_format::FileFormat;
|
use crate::file_format::FileFormat;
|
||||||
@@ -32,18 +34,49 @@ use crate::file_format::FileFormat;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub struct OrcFormat;
|
pub struct OrcFormat;
|
||||||
|
|
||||||
pub async fn new_orc_stream_reader<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
#[derive(Clone)]
|
||||||
reader: R,
|
pub struct ReaderAdapter {
|
||||||
) -> Result<ArrowStreamReader<R>> {
|
reader: object_store::Reader,
|
||||||
|
len: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReaderAdapter {
|
||||||
|
pub fn new(reader: object_store::Reader, len: u64) -> Self {
|
||||||
|
Self { reader, len }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncChunkReader for ReaderAdapter {
|
||||||
|
fn len(&mut self) -> BoxFuture<'_, std::io::Result<u64>> {
|
||||||
|
async move { Ok(self.len) }.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_bytes(
|
||||||
|
&mut self,
|
||||||
|
offset_from_start: u64,
|
||||||
|
length: u64,
|
||||||
|
) -> BoxFuture<'_, std::io::Result<Bytes>> {
|
||||||
|
async move {
|
||||||
|
let bytes = self
|
||||||
|
.reader
|
||||||
|
.read(offset_from_start..offset_from_start + length)
|
||||||
|
.await?;
|
||||||
|
Ok(bytes.to_bytes())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new_orc_stream_reader(
|
||||||
|
reader: ReaderAdapter,
|
||||||
|
) -> Result<ArrowStreamReader<ReaderAdapter>> {
|
||||||
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
let reader_build = ArrowReaderBuilder::try_new_async(reader)
|
||||||
.await
|
.await
|
||||||
.context(error::OrcReaderSnafu)?;
|
.context(error::OrcReaderSnafu)?;
|
||||||
Ok(reader_build.build_async())
|
Ok(reader_build.build_async())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>(
|
pub async fn infer_orc_schema(reader: ReaderAdapter) -> Result<Schema> {
|
||||||
reader: R,
|
|
||||||
) -> Result<Schema> {
|
|
||||||
let reader = new_orc_stream_reader(reader).await?;
|
let reader = new_orc_stream_reader(reader).await?;
|
||||||
Ok(reader.schema().as_ref().clone())
|
Ok(reader.schema().as_ref().clone())
|
||||||
}
|
}
|
||||||
@@ -51,13 +84,15 @@ pub async fn infer_orc_schema<R: AsyncRead + AsyncSeek + Unpin + Send + 'static>
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for OrcFormat {
|
impl FileFormat for OrcFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
|
let meta = store
|
||||||
|
.stat(path)
|
||||||
|
.await
|
||||||
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
let reader = store
|
let reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
|
let schema = infer_orc_schema(ReaderAdapter::new(reader, meta.content_length())).await?;
|
||||||
let schema = infer_orc_schema(reader).await?;
|
|
||||||
|
|
||||||
Ok(schema)
|
Ok(schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -97,15 +132,23 @@ impl FileOpener for OrcOpener {
|
|||||||
};
|
};
|
||||||
let projection = self.projection.clone();
|
let projection = self.projection.clone();
|
||||||
Ok(Box::pin(async move {
|
Ok(Box::pin(async move {
|
||||||
let reader = object_store
|
let path = meta.location().to_string();
|
||||||
.reader(meta.location().to_string().as_str())
|
|
||||||
|
let meta = object_store
|
||||||
|
.stat(&path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
|
|
||||||
let stream_reader = new_orc_stream_reader(reader)
|
let reader = object_store
|
||||||
|
.reader(&path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
|
|
||||||
|
let stream_reader =
|
||||||
|
new_orc_stream_reader(ReaderAdapter::new(reader, meta.content_length()))
|
||||||
|
.await
|
||||||
|
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||||
|
|
||||||
let stream =
|
let stream =
|
||||||
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);
|
RecordBatchStreamTypeAdapter::new(projected_schema, stream_reader, projection);
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use std::result;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use arrow_schema::{Schema, SchemaRef};
|
use arrow_schema::Schema;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory};
|
||||||
use datafusion::error::Result as DatafusionResult;
|
use datafusion::error::Result as DatafusionResult;
|
||||||
@@ -29,15 +29,18 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
|||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use object_store::{ObjectStore, Reader, Writer};
|
use object_store::{FuturesAsyncReader, ObjectStore};
|
||||||
|
use parquet::arrow::AsyncArrowWriter;
|
||||||
use parquet::basic::{Compression, ZstdLevel};
|
use parquet::basic::{Compression, ZstdLevel};
|
||||||
use parquet::file::properties::WriterProperties;
|
use parquet::file::properties::WriterProperties;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
|
||||||
|
|
||||||
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder, LazyBufferedWriter};
|
use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result, WriteObjectSnafu, WriteParquetSnafu};
|
||||||
use crate::file_format::FileFormat;
|
use crate::file_format::FileFormat;
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
|
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub struct ParquetFormat {}
|
pub struct ParquetFormat {}
|
||||||
@@ -45,10 +48,16 @@ pub struct ParquetFormat {}
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl FileFormat for ParquetFormat {
|
impl FileFormat for ParquetFormat {
|
||||||
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
|
||||||
|
let meta = store
|
||||||
|
.stat(path)
|
||||||
|
.await
|
||||||
|
.context(error::ReadObjectSnafu { path })?;
|
||||||
let mut reader = store
|
let mut reader = store
|
||||||
.reader(path)
|
.reader(path)
|
||||||
.await
|
.await
|
||||||
.context(error::ReadObjectSnafu { path })?;
|
.context(error::ReadObjectSnafu { path })?
|
||||||
|
.into_futures_async_read(0..meta.content_length())
|
||||||
|
.compat();
|
||||||
|
|
||||||
let metadata = reader
|
let metadata = reader
|
||||||
.get_metadata()
|
.get_metadata()
|
||||||
@@ -98,7 +107,7 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
|
|||||||
|
|
||||||
pub struct LazyParquetFileReader {
|
pub struct LazyParquetFileReader {
|
||||||
object_store: ObjectStore,
|
object_store: ObjectStore,
|
||||||
reader: Option<Reader>,
|
reader: Option<Compat<FuturesAsyncReader>>,
|
||||||
path: String,
|
path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +123,13 @@ impl LazyParquetFileReader {
|
|||||||
/// Must initialize the reader, or throw an error from the future.
|
/// Must initialize the reader, or throw an error from the future.
|
||||||
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
async fn maybe_initialize(&mut self) -> result::Result<(), object_store::Error> {
|
||||||
if self.reader.is_none() {
|
if self.reader.is_none() {
|
||||||
let reader = self.object_store.reader(&self.path).await?;
|
let meta = self.object_store.stat(&self.path).await?;
|
||||||
|
let reader = self
|
||||||
|
.object_store
|
||||||
|
.reader(&self.path)
|
||||||
|
.await?
|
||||||
|
.into_futures_async_read(0..meta.content_length())
|
||||||
|
.compat();
|
||||||
self.reader = Some(reader);
|
self.reader = Some(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,72 +175,6 @@ impl ArrowWriterCloser for ArrowWriter<SharedBuffer> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parquet writer that buffers row groups in memory and writes buffered data to an underlying
|
|
||||||
/// storage by chunks to reduce memory consumption.
|
|
||||||
pub struct BufferedWriter {
|
|
||||||
inner: InnerBufferedWriter,
|
|
||||||
}
|
|
||||||
|
|
||||||
type InnerBufferedWriter = LazyBufferedWriter<
|
|
||||||
object_store::Writer,
|
|
||||||
ArrowWriter<SharedBuffer>,
|
|
||||||
impl Fn(String) -> BoxFuture<'static, Result<Writer>>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
impl BufferedWriter {
|
|
||||||
fn make_write_factory(
|
|
||||||
store: ObjectStore,
|
|
||||||
concurrency: usize,
|
|
||||||
) -> impl Fn(String) -> BoxFuture<'static, Result<Writer>> {
|
|
||||||
move |path| {
|
|
||||||
let store = store.clone();
|
|
||||||
Box::pin(async move {
|
|
||||||
store
|
|
||||||
.writer_with(&path)
|
|
||||||
.concurrent(concurrency)
|
|
||||||
.await
|
|
||||||
.context(error::WriteObjectSnafu { path })
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn try_new(
|
|
||||||
path: String,
|
|
||||||
store: ObjectStore,
|
|
||||||
arrow_schema: SchemaRef,
|
|
||||||
props: Option<WriterProperties>,
|
|
||||||
buffer_threshold: usize,
|
|
||||||
concurrency: usize,
|
|
||||||
) -> error::Result<Self> {
|
|
||||||
let buffer = SharedBuffer::with_capacity(buffer_threshold);
|
|
||||||
|
|
||||||
let arrow_writer = ArrowWriter::try_new(buffer.clone(), arrow_schema.clone(), props)
|
|
||||||
.context(error::WriteParquetSnafu { path: &path })?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
inner: LazyBufferedWriter::new(
|
|
||||||
buffer_threshold,
|
|
||||||
buffer,
|
|
||||||
arrow_writer,
|
|
||||||
&path,
|
|
||||||
Self::make_write_factory(store, concurrency),
|
|
||||||
),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write a record batch to stream writer.
|
|
||||||
pub async fn write(&mut self, arrow_batch: &RecordBatch) -> error::Result<()> {
|
|
||||||
self.inner.write(arrow_batch).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Close parquet writer.
|
|
||||||
///
|
|
||||||
/// Return file metadata and bytes written.
|
|
||||||
pub async fn close(self) -> error::Result<(FileMetaData, u64)> {
|
|
||||||
self.inner.close_with_arrow_writer().await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Output the stream to a parquet file.
|
/// Output the stream to a parquet file.
|
||||||
///
|
///
|
||||||
/// Returns number of rows written.
|
/// Returns number of rows written.
|
||||||
@@ -233,29 +182,33 @@ pub async fn stream_to_parquet(
|
|||||||
mut stream: SendableRecordBatchStream,
|
mut stream: SendableRecordBatchStream,
|
||||||
store: ObjectStore,
|
store: ObjectStore,
|
||||||
path: &str,
|
path: &str,
|
||||||
threshold: usize,
|
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
let write_props = WriterProperties::builder()
|
let write_props = WriterProperties::builder()
|
||||||
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
.set_compression(Compression::ZSTD(ZstdLevel::default()))
|
||||||
.build();
|
.build();
|
||||||
let schema = stream.schema();
|
let schema = stream.schema();
|
||||||
let mut buffered_writer = BufferedWriter::try_new(
|
let inner_writer = store
|
||||||
path.to_string(),
|
.writer_with(path)
|
||||||
store,
|
.concurrent(concurrency)
|
||||||
schema,
|
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||||
Some(write_props),
|
.await
|
||||||
threshold,
|
.map(|w| w.into_futures_async_write().compat_write())
|
||||||
concurrency,
|
.context(WriteObjectSnafu { path })?;
|
||||||
)
|
|
||||||
.await?;
|
let mut writer = AsyncArrowWriter::try_new(inner_writer, schema, Some(write_props))
|
||||||
|
.context(WriteParquetSnafu { path })?;
|
||||||
let mut rows_written = 0;
|
let mut rows_written = 0;
|
||||||
|
|
||||||
while let Some(batch) = stream.next().await {
|
while let Some(batch) = stream.next().await {
|
||||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||||
buffered_writer.write(&batch).await?;
|
writer
|
||||||
|
.write(&batch)
|
||||||
|
.await
|
||||||
|
.context(WriteParquetSnafu { path })?;
|
||||||
rows_written += batch.num_rows();
|
rows_written += batch.num_rows();
|
||||||
}
|
}
|
||||||
buffered_writer.close().await?;
|
writer.close().await.context(WriteParquetSnafu { path })?;
|
||||||
Ok(rows_written)
|
Ok(rows_written)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -27,3 +27,8 @@ pub mod test_util;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests;
|
pub mod tests;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
|
use common_base::readable_size::ReadableSize;
|
||||||
|
|
||||||
|
/// Default write buffer size, it should be greater than the default minimum upload part of S3 (5mb).
|
||||||
|
pub const DEFAULT_WRITE_BUFFER_SIZE: ReadableSize = ReadableSize::mb(8);
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
|||||||
|
|
||||||
let written = tmp_store.read(&output_path).await.unwrap();
|
let written = tmp_store.read(&output_path).await.unwrap();
|
||||||
let origin = store.read(origin_path).await.unwrap();
|
let origin = store.read(origin_path).await.unwrap();
|
||||||
assert_eq_lines(written, origin);
|
assert_eq_lines(written.to_vec(), origin.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usize) -> usize) {
|
||||||
@@ -158,7 +158,7 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
|
|||||||
|
|
||||||
let written = tmp_store.read(&output_path).await.unwrap();
|
let written = tmp_store.read(&output_path).await.unwrap();
|
||||||
let origin = store.read(origin_path).await.unwrap();
|
let origin = store.read(origin_path).await.unwrap();
|
||||||
assert_eq_lines(written, origin);
|
assert_eq_lines(written.to_vec(), origin.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore the CRLF difference across operating systems.
|
// Ignore the CRLF difference across operating systems.
|
||||||
|
|||||||
@@ -10,3 +10,4 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
strum.workspace = true
|
strum.workspace = true
|
||||||
|
tonic.workspace = true
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use strum::{AsRefStr, EnumIter, EnumString, FromRepr};
|
use strum::{AsRefStr, EnumIter, EnumString, FromRepr};
|
||||||
|
use tonic::Code;
|
||||||
|
|
||||||
/// Common status code for public API.
|
/// Common status code for public API.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, EnumIter, FromRepr)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, EnumIter, FromRepr)]
|
||||||
@@ -202,6 +203,75 @@ impl fmt::Display for StatusCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! define_into_tonic_status {
|
||||||
|
($Error: ty) => {
|
||||||
|
impl From<$Error> for tonic::Status {
|
||||||
|
fn from(err: $Error) -> Self {
|
||||||
|
use tonic::codegen::http::{HeaderMap, HeaderValue};
|
||||||
|
use tonic::metadata::MetadataMap;
|
||||||
|
use $crate::GREPTIME_DB_HEADER_ERROR_CODE;
|
||||||
|
|
||||||
|
let mut headers = HeaderMap::<HeaderValue>::with_capacity(2);
|
||||||
|
|
||||||
|
// If either of the status_code or error msg cannot convert to valid HTTP header value
|
||||||
|
// (which is a very rare case), just ignore. Client will use Tonic status code and message.
|
||||||
|
let status_code = err.status_code();
|
||||||
|
headers.insert(
|
||||||
|
GREPTIME_DB_HEADER_ERROR_CODE,
|
||||||
|
HeaderValue::from(status_code as u32),
|
||||||
|
);
|
||||||
|
let root_error = err.output_msg();
|
||||||
|
|
||||||
|
let metadata = MetadataMap::from_headers(headers);
|
||||||
|
tonic::Status::with_metadata(
|
||||||
|
$crate::status_code::status_to_tonic_code(status_code),
|
||||||
|
root_error,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the tonic [Code] of a [StatusCode].
|
||||||
|
pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||||
|
match status_code {
|
||||||
|
StatusCode::Success => Code::Ok,
|
||||||
|
StatusCode::Unknown => Code::Unknown,
|
||||||
|
StatusCode::Unsupported => Code::Unimplemented,
|
||||||
|
StatusCode::Unexpected
|
||||||
|
| StatusCode::Internal
|
||||||
|
| StatusCode::PlanQuery
|
||||||
|
| StatusCode::EngineExecuteQuery => Code::Internal,
|
||||||
|
StatusCode::InvalidArguments | StatusCode::InvalidSyntax | StatusCode::RequestOutdated => {
|
||||||
|
Code::InvalidArgument
|
||||||
|
}
|
||||||
|
StatusCode::Cancelled => Code::Cancelled,
|
||||||
|
StatusCode::TableAlreadyExists
|
||||||
|
| StatusCode::TableColumnExists
|
||||||
|
| StatusCode::RegionAlreadyExists
|
||||||
|
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
|
||||||
|
StatusCode::TableNotFound
|
||||||
|
| StatusCode::RegionNotFound
|
||||||
|
| StatusCode::TableColumnNotFound
|
||||||
|
| StatusCode::DatabaseNotFound
|
||||||
|
| StatusCode::UserNotFound
|
||||||
|
| StatusCode::FlowNotFound => Code::NotFound,
|
||||||
|
StatusCode::StorageUnavailable | StatusCode::RegionNotReady => Code::Unavailable,
|
||||||
|
StatusCode::RuntimeResourcesExhausted
|
||||||
|
| StatusCode::RateLimited
|
||||||
|
| StatusCode::RegionBusy => Code::ResourceExhausted,
|
||||||
|
StatusCode::UnsupportedPasswordType
|
||||||
|
| StatusCode::UserPasswordMismatch
|
||||||
|
| StatusCode::AuthHeaderNotFound
|
||||||
|
| StatusCode::InvalidAuthHeader => Code::Unauthenticated,
|
||||||
|
StatusCode::AccessDenied | StatusCode::PermissionDenied | StatusCode::RegionReadonly => {
|
||||||
|
Code::PermissionDenied
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use strum::IntoEnumIterator;
|
use strum::IntoEnumIterator;
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ fn clamp_impl<T: LogicalPrimitiveType, const CLAMP_MIN: bool, const CLAMP_MAX: b
|
|||||||
min: T::Native,
|
min: T::Native,
|
||||||
max: T::Native,
|
max: T::Native,
|
||||||
) -> Result<VectorRef> {
|
) -> Result<VectorRef> {
|
||||||
common_telemetry::info!("[DEBUG] min {min:?}, max {max:?}");
|
|
||||||
|
|
||||||
let iter = ArrayIter::new(input);
|
let iter = ArrayIter::new(input);
|
||||||
let result = iter.map(|x| {
|
let result = iter.map(|x| {
|
||||||
x.map(|x| {
|
x.map(|x| {
|
||||||
|
|||||||
@@ -44,10 +44,10 @@ struct ProcedureStateJson {
|
|||||||
/// A function to query procedure state by its id.
|
/// A function to query procedure state by its id.
|
||||||
/// Such as `procedure_state(pid)`.
|
/// Such as `procedure_state(pid)`.
|
||||||
#[admin_fn(
|
#[admin_fn(
|
||||||
name = "ProcedureStateFunction",
|
name = ProcedureStateFunction,
|
||||||
display_name = "procedure_state",
|
display_name = procedure_state,
|
||||||
sig_fn = "signature",
|
sig_fn = signature,
|
||||||
ret = "string"
|
ret = string
|
||||||
)]
|
)]
|
||||||
pub(crate) async fn procedure_state(
|
pub(crate) async fn procedure_state(
|
||||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ use crate::helper::cast_u64;
|
|||||||
macro_rules! define_region_function {
|
macro_rules! define_region_function {
|
||||||
($name: expr, $display_name_str: expr, $display_name: ident) => {
|
($name: expr, $display_name_str: expr, $display_name: ident) => {
|
||||||
/// A function to $display_name
|
/// A function to $display_name
|
||||||
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = "signature", ret = "uint64")]
|
#[admin_fn(name = $name, display_name = $display_name_str, sig_fn = signature, ret = uint64)]
|
||||||
pub(crate) async fn $display_name(
|
pub(crate) async fn $display_name(
|
||||||
table_mutation_handler: &TableMutationHandlerRef,
|
table_mutation_handler: &TableMutationHandlerRef,
|
||||||
query_ctx: &QueryContextRef,
|
query_ctx: &QueryContextRef,
|
||||||
@@ -53,7 +53,7 @@ macro_rules! define_region_function {
|
|||||||
|
|
||||||
let Some(region_id) = cast_u64(¶ms[0])? else {
|
let Some(region_id) = cast_u64(¶ms[0])? else {
|
||||||
return UnsupportedInputDataTypeSnafu {
|
return UnsupportedInputDataTypeSnafu {
|
||||||
function: $display_name_str,
|
function: stringify!($display_name_str),
|
||||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||||
}
|
}
|
||||||
.fail();
|
.fail();
|
||||||
@@ -68,9 +68,9 @@ macro_rules! define_region_function {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
define_region_function!("FlushRegionFunction", "flush_region", flush_region);
|
define_region_function!(FlushRegionFunction, flush_region, flush_region);
|
||||||
|
|
||||||
define_region_function!("CompactRegionFunction", "compact_region", compact_region);
|
define_region_function!(CompactRegionFunction, compact_region, compact_region);
|
||||||
|
|
||||||
fn signature() -> Signature {
|
fn signature() -> Signature {
|
||||||
Signature::uniform(1, ConcreteDataType::numerics(), Volatility::Immutable)
|
Signature::uniform(1, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||||
|
|||||||
@@ -40,10 +40,10 @@ use crate::handlers::TableMutationHandlerRef;
|
|||||||
const COMPACT_TYPE_STRICT_WINDOW: &str = "strict_window";
|
const COMPACT_TYPE_STRICT_WINDOW: &str = "strict_window";
|
||||||
|
|
||||||
#[admin_fn(
|
#[admin_fn(
|
||||||
name = "FlushTableFunction",
|
name = FlushTableFunction,
|
||||||
display_name = "flush_table",
|
display_name = flush_table,
|
||||||
sig_fn = "flush_signature",
|
sig_fn = flush_signature,
|
||||||
ret = "uint64"
|
ret = uint64
|
||||||
)]
|
)]
|
||||||
pub(crate) async fn flush_table(
|
pub(crate) async fn flush_table(
|
||||||
table_mutation_handler: &TableMutationHandlerRef,
|
table_mutation_handler: &TableMutationHandlerRef,
|
||||||
@@ -87,10 +87,10 @@ pub(crate) async fn flush_table(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[admin_fn(
|
#[admin_fn(
|
||||||
name = "CompactTableFunction",
|
name = CompactTableFunction,
|
||||||
display_name = "compact_table",
|
display_name = compact_table,
|
||||||
sig_fn = "compact_signature",
|
sig_fn = compact_signature,
|
||||||
ret = "uint64"
|
ret = uint64
|
||||||
)]
|
)]
|
||||||
pub(crate) async fn compact_table(
|
pub(crate) async fn compact_table(
|
||||||
table_mutation_handler: &TableMutationHandlerRef,
|
table_mutation_handler: &TableMutationHandlerRef,
|
||||||
|
|||||||
@@ -46,10 +46,10 @@ const DEFAULT_REPLAY_TIMEOUT_SECS: u64 = 10;
|
|||||||
/// - `from_peer`: the source peer id
|
/// - `from_peer`: the source peer id
|
||||||
/// - `to_peer`: the target peer id
|
/// - `to_peer`: the target peer id
|
||||||
#[admin_fn(
|
#[admin_fn(
|
||||||
name = "MigrateRegionFunction",
|
name = MigrateRegionFunction,
|
||||||
display_name = "migrate_region",
|
display_name = migrate_region,
|
||||||
sig_fn = "signature",
|
sig_fn = signature,
|
||||||
ret = "string"
|
ret = string
|
||||||
)]
|
)]
|
||||||
pub(crate) async fn migrate_region(
|
pub(crate) async fn migrate_region(
|
||||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ prost.workspace = true
|
|||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
tower = "0.4"
|
tower.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.4"
|
criterion = "0.4"
|
||||||
|
|||||||
@@ -13,13 +13,7 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
proc-macro2 = "1.0.66"
|
proc-macro2 = "1.0.66"
|
||||||
quote = "1.0"
|
quote = "1.0"
|
||||||
syn = "1.0"
|
syn = { version = "2.0", features = [
|
||||||
syn2 = { version = "2.0", package = "syn", features = [
|
|
||||||
"derive",
|
|
||||||
"parsing",
|
|
||||||
"printing",
|
|
||||||
"clone-impls",
|
|
||||||
"proc-macro",
|
|
||||||
"extra-traits",
|
"extra-traits",
|
||||||
"full",
|
"full",
|
||||||
] }
|
] }
|
||||||
|
|||||||
@@ -16,11 +16,11 @@ use proc_macro::TokenStream;
|
|||||||
use quote::quote;
|
use quote::quote;
|
||||||
use syn::spanned::Spanned;
|
use syn::spanned::Spanned;
|
||||||
use syn::{
|
use syn::{
|
||||||
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypePath,
|
parse_macro_input, Attribute, Ident, ItemFn, Signature, Type, TypePath, TypeReference,
|
||||||
TypeReference, Visibility,
|
Visibility,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
|
use crate::utils::extract_input_types;
|
||||||
|
|
||||||
/// Internal util macro to early return on error.
|
/// Internal util macro to early return on error.
|
||||||
macro_rules! ok {
|
macro_rules! ok {
|
||||||
@@ -40,12 +40,31 @@ macro_rules! error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
let mut result = TokenStream::new();
|
let mut name: Option<Ident> = None;
|
||||||
|
let mut display_name: Option<Ident> = None;
|
||||||
|
let mut sig_fn: Option<Ident> = None;
|
||||||
|
let mut ret: Option<Ident> = None;
|
||||||
|
|
||||||
|
let parser = syn::meta::parser(|meta| {
|
||||||
|
if meta.path.is_ident("name") {
|
||||||
|
name = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else if meta.path.is_ident("display_name") {
|
||||||
|
display_name = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else if meta.path.is_ident("sig_fn") {
|
||||||
|
sig_fn = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else if meta.path.is_ident("ret") {
|
||||||
|
ret = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(meta.error("unsupported property"))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// extract arg map
|
// extract arg map
|
||||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
parse_macro_input!(args with parser);
|
||||||
let arg_span = arg_pairs[0].span();
|
|
||||||
let arg_map = ok!(extract_arg_map(arg_pairs));
|
|
||||||
|
|
||||||
// decompose the fn block
|
// decompose the fn block
|
||||||
let compute_fn = parse_macro_input!(input as ItemFn);
|
let compute_fn = parse_macro_input!(input as ItemFn);
|
||||||
@@ -72,16 +91,17 @@ pub(crate) fn process_admin_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
|||||||
}
|
}
|
||||||
let handler_type = ok!(extract_handler_type(&arg_types));
|
let handler_type = ok!(extract_handler_type(&arg_types));
|
||||||
|
|
||||||
|
let mut result = TokenStream::new();
|
||||||
// build the struct and its impl block
|
// build the struct and its impl block
|
||||||
// only do this when `display_name` is specified
|
// only do this when `display_name` is specified
|
||||||
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
|
if let Some(display_name) = display_name {
|
||||||
let struct_code = build_struct(
|
let struct_code = build_struct(
|
||||||
attrs,
|
attrs,
|
||||||
vis,
|
vis,
|
||||||
fn_name,
|
fn_name,
|
||||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
name.expect("name required"),
|
||||||
ok!(get_ident(&arg_map, "sig_fn", arg_span)),
|
sig_fn.expect("sig_fn required"),
|
||||||
ok!(get_ident(&arg_map, "ret", arg_span)),
|
ret.expect("ret required"),
|
||||||
handler_type,
|
handler_type,
|
||||||
display_name,
|
display_name,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -14,28 +14,24 @@
|
|||||||
|
|
||||||
use proc_macro::TokenStream;
|
use proc_macro::TokenStream;
|
||||||
use quote::{quote, ToTokens};
|
use quote::{quote, ToTokens};
|
||||||
use syn::{parse_macro_input, AttributeArgs, ItemFn, Lit, Meta, NestedMeta};
|
use syn::{parse_macro_input, ItemFn, LitInt};
|
||||||
|
|
||||||
pub(crate) fn process_print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
pub(crate) fn process_print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
let mut depth = 1;
|
let mut depth = 1;
|
||||||
|
let parser = syn::meta::parser(|meta| {
|
||||||
let args = parse_macro_input!(args as AttributeArgs);
|
if meta.path.is_ident("depth") {
|
||||||
for meta in args.iter() {
|
depth = meta
|
||||||
if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
|
.value()?
|
||||||
let ident = name_value
|
.parse::<LitInt>()
|
||||||
.path
|
.and_then(|v| v.base10_parse::<usize>())
|
||||||
.get_ident()
|
.expect("Invalid 'depth' value");
|
||||||
.expect("Expected an ident!")
|
Ok(())
|
||||||
.to_string();
|
} else {
|
||||||
if ident == "depth" {
|
Err(meta.error("unsupported property"))
|
||||||
let Lit::Int(i) = &name_value.lit else {
|
|
||||||
panic!("Expected 'depth' to be a valid int!")
|
|
||||||
};
|
|
||||||
depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
|
|
||||||
|
parse_macro_input!(args with parser);
|
||||||
|
|
||||||
let tokens: TokenStream = quote! {
|
let tokens: TokenStream = quote! {
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -16,11 +16,10 @@ use proc_macro::TokenStream;
|
|||||||
use quote::quote;
|
use quote::quote;
|
||||||
use syn::spanned::Spanned;
|
use syn::spanned::Spanned;
|
||||||
use syn::{
|
use syn::{
|
||||||
parse_macro_input, Attribute, AttributeArgs, Ident, ItemFn, Signature, Type, TypeReference,
|
parse_macro_input, Attribute, Ident, ItemFn, Signature, Type, TypeReference, Visibility,
|
||||||
Visibility,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::utils::{extract_arg_map, extract_input_types, get_ident};
|
use crate::utils::extract_input_types;
|
||||||
|
|
||||||
macro_rules! ok {
|
macro_rules! ok {
|
||||||
($item:expr) => {
|
($item:expr) => {
|
||||||
@@ -32,12 +31,27 @@ macro_rules! ok {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
let mut result = TokenStream::new();
|
let mut name: Option<Ident> = None;
|
||||||
|
let mut display_name: Option<Ident> = None;
|
||||||
|
let mut ret: Option<Ident> = None;
|
||||||
|
|
||||||
|
let parser = syn::meta::parser(|meta| {
|
||||||
|
if meta.path.is_ident("name") {
|
||||||
|
name = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else if meta.path.is_ident("display_name") {
|
||||||
|
display_name = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else if meta.path.is_ident("ret") {
|
||||||
|
ret = Some(meta.value()?.parse()?);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(meta.error("unsupported property"))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// extract arg map
|
// extract arg map
|
||||||
let arg_pairs = parse_macro_input!(args as AttributeArgs);
|
parse_macro_input!(args with parser);
|
||||||
let arg_span = arg_pairs[0].span();
|
|
||||||
let arg_map = ok!(extract_arg_map(arg_pairs));
|
|
||||||
|
|
||||||
// decompose the fn block
|
// decompose the fn block
|
||||||
let compute_fn = parse_macro_input!(input as ItemFn);
|
let compute_fn = parse_macro_input!(input as ItemFn);
|
||||||
@@ -68,25 +82,27 @@ pub(crate) fn process_range_fn(args: TokenStream, input: TokenStream) -> TokenSt
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut result = TokenStream::new();
|
||||||
|
|
||||||
// build the struct and its impl block
|
// build the struct and its impl block
|
||||||
// only do this when `display_name` is specified
|
// only do this when `display_name` is specified
|
||||||
if let Ok(display_name) = get_ident(&arg_map, "display_name", arg_span) {
|
if let Some(display_name) = display_name {
|
||||||
let struct_code = build_struct(
|
let struct_code = build_struct(
|
||||||
attrs,
|
attrs,
|
||||||
vis,
|
vis,
|
||||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
name.clone().expect("name required"),
|
||||||
display_name,
|
display_name,
|
||||||
array_types,
|
array_types,
|
||||||
ok!(get_ident(&arg_map, "ret", arg_span)),
|
ret.clone().expect("ret required"),
|
||||||
);
|
);
|
||||||
result.extend(struct_code);
|
result.extend(struct_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
let calc_fn_code = build_calc_fn(
|
let calc_fn_code = build_calc_fn(
|
||||||
ok!(get_ident(&arg_map, "name", arg_span)),
|
name.expect("name required"),
|
||||||
arg_types,
|
arg_types,
|
||||||
fn_name.clone(),
|
fn_name.clone(),
|
||||||
ok!(get_ident(&arg_map, "ret", arg_span)),
|
ret.expect("ret required"),
|
||||||
);
|
);
|
||||||
// preserve this fn, but remove its `pub` modifier
|
// preserve this fn, but remove its `pub` modifier
|
||||||
let input_fn_code: TokenStream = quote! {
|
let input_fn_code: TokenStream = quote! {
|
||||||
|
|||||||
@@ -16,13 +16,13 @@
|
|||||||
|
|
||||||
use proc_macro2::{Span, TokenStream as TokenStream2};
|
use proc_macro2::{Span, TokenStream as TokenStream2};
|
||||||
use quote::{quote, quote_spanned};
|
use quote::{quote, quote_spanned};
|
||||||
use syn2::spanned::Spanned;
|
use syn::spanned::Spanned;
|
||||||
use syn2::{parenthesized, Attribute, Ident, ItemEnum, Variant};
|
use syn::{parenthesized, Attribute, Ident, ItemEnum, Variant};
|
||||||
|
|
||||||
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
|
pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 {
|
||||||
let input_cloned: TokenStream2 = input.clone();
|
let input_cloned: TokenStream2 = input.clone();
|
||||||
|
|
||||||
let error_enum_definition: ItemEnum = syn2::parse2(input_cloned).unwrap();
|
let error_enum_definition: ItemEnum = syn::parse2(input_cloned).unwrap();
|
||||||
let enum_name = error_enum_definition.ident;
|
let enum_name = error_enum_definition.ident;
|
||||||
|
|
||||||
let mut variants = vec![];
|
let mut variants = vec![];
|
||||||
|
|||||||
@@ -12,48 +12,10 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use proc_macro2::Span;
|
|
||||||
use syn::punctuated::Punctuated;
|
use syn::punctuated::Punctuated;
|
||||||
use syn::spanned::Spanned;
|
use syn::spanned::Spanned;
|
||||||
use syn::token::Comma;
|
use syn::token::Comma;
|
||||||
use syn::{FnArg, Ident, Meta, MetaNameValue, NestedMeta, Type};
|
use syn::{FnArg, Type};
|
||||||
|
|
||||||
/// Extract a String <-> Ident map from the attribute args.
|
|
||||||
pub(crate) fn extract_arg_map(args: Vec<NestedMeta>) -> Result<HashMap<String, Ident>, syn::Error> {
|
|
||||||
args.into_iter()
|
|
||||||
.map(|meta| {
|
|
||||||
if let NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) = meta {
|
|
||||||
let name = path.get_ident().unwrap().to_string();
|
|
||||||
let ident = match lit {
|
|
||||||
syn::Lit::Str(lit_str) => lit_str.parse::<Ident>(),
|
|
||||||
_ => Err(syn::Error::new(
|
|
||||||
lit.span(),
|
|
||||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
|
||||||
)),
|
|
||||||
}?;
|
|
||||||
Ok((name, ident))
|
|
||||||
} else {
|
|
||||||
Err(syn::Error::new(
|
|
||||||
meta.span(),
|
|
||||||
"Unexpected attribute format. Expected `name = \"value\"`",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<String, Ident>, syn::Error>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper function to get an Ident from the previous arg map.
|
|
||||||
pub(crate) fn get_ident(
|
|
||||||
map: &HashMap<String, Ident>,
|
|
||||||
key: &str,
|
|
||||||
span: Span,
|
|
||||||
) -> Result<Ident, syn::Error> {
|
|
||||||
map.get(key)
|
|
||||||
.cloned()
|
|
||||||
.ok_or_else(|| syn::Error::new(span, format!("Expect attribute {key} but not found")))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the argument list from the annotated function.
|
/// Extract the argument list from the annotated function.
|
||||||
pub(crate) fn extract_input_types(
|
pub(crate) fn extract_input_types(
|
||||||
|
|||||||
@@ -25,11 +25,13 @@ common-grpc-expr.workspace = true
|
|||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-procedure.workspace = true
|
common-procedure.workspace = true
|
||||||
common-procedure-test.workspace = true
|
common-procedure-test.workspace = true
|
||||||
|
common-query.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
datafusion-common.workspace = true
|
datafusion-common.workspace = true
|
||||||
|
datafusion-expr.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
derive_builder.workspace = true
|
derive_builder.workspace = true
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pub use registry::{
|
|||||||
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
LayeredCacheRegistryBuilder, LayeredCacheRegistryRef,
|
||||||
};
|
};
|
||||||
pub use table::{
|
pub use table::{
|
||||||
new_table_info_cache, new_table_name_cache, new_table_route_cache, TableInfoCache,
|
new_table_info_cache, new_table_name_cache, new_table_route_cache, new_view_info_cache,
|
||||||
TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache,
|
TableInfoCache, TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute,
|
||||||
TableRouteCacheRef,
|
TableRouteCache, TableRouteCacheRef, ViewInfoCache, ViewInfoCacheRef,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -145,13 +145,13 @@ mod tests {
|
|||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use moka::future::CacheBuilder;
|
use moka::future::CacheBuilder;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
use crate::cache::flow::table_flownode::new_table_flownode_set_cache;
|
||||||
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
use crate::instruction::{CacheIdent, CreateFlow, DropFlow};
|
||||||
use crate::key::flow::flow_info::FlowInfoValue;
|
use crate::key::flow::flow_info::FlowInfoValue;
|
||||||
use crate::key::flow::FlowMetadataManager;
|
use crate::key::flow::FlowMetadataManager;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_cache_empty_set() {
|
async fn test_cache_empty_set() {
|
||||||
|
|||||||
3
src/common/meta/src/cache/table.rs
vendored
3
src/common/meta/src/cache/table.rs
vendored
@@ -15,6 +15,9 @@
|
|||||||
mod table_info;
|
mod table_info;
|
||||||
mod table_name;
|
mod table_name;
|
||||||
mod table_route;
|
mod table_route;
|
||||||
|
mod view_info;
|
||||||
|
|
||||||
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef};
|
||||||
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef};
|
||||||
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef};
|
||||||
|
pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef};
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use futures::future::BoxFuture;
|
|||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache::{CacheContainer, Initializer};
|
use crate::cache::{CacheContainer, Initializer};
|
||||||
use crate::error;
|
use crate::error;
|
||||||
@@ -25,7 +26,6 @@ use crate::error::Result;
|
|||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
use crate::key::table_name::{TableNameKey, TableNameManager, TableNameManagerRef};
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
/// [TableNameCache] caches the [TableName] to [TableId] mapping.
|
||||||
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
pub type TableNameCache = CacheContainer<TableName, TableId, CacheIdent>;
|
||||||
|
|||||||
143
src/common/meta/src/cache/table/view_info.rs
vendored
Normal file
143
src/common/meta/src/cache/table/view_info.rs
vendored
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures::future::BoxFuture;
|
||||||
|
use moka::future::Cache;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use store_api::storage::TableId;
|
||||||
|
|
||||||
|
use crate::cache::{CacheContainer, Initializer};
|
||||||
|
use crate::error;
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
|
use crate::key::view_info::{ViewInfoManager, ViewInfoManagerRef, ViewInfoValue};
|
||||||
|
use crate::kv_backend::KvBackendRef;
|
||||||
|
|
||||||
|
/// [ViewInfoCache] caches the [TableId] to [ViewInfoValue] mapping.
|
||||||
|
pub type ViewInfoCache = CacheContainer<TableId, Arc<ViewInfoValue>, CacheIdent>;
|
||||||
|
|
||||||
|
pub type ViewInfoCacheRef = Arc<ViewInfoCache>;
|
||||||
|
|
||||||
|
/// Constructs a [ViewInfoCache].
|
||||||
|
pub fn new_view_info_cache(
|
||||||
|
name: String,
|
||||||
|
cache: Cache<TableId, Arc<ViewInfoValue>>,
|
||||||
|
kv_backend: KvBackendRef,
|
||||||
|
) -> ViewInfoCache {
|
||||||
|
let view_info_manager = Arc::new(ViewInfoManager::new(kv_backend));
|
||||||
|
let init = init_factory(view_info_manager);
|
||||||
|
|
||||||
|
CacheContainer::new(name, cache, Box::new(invalidator), init, Box::new(filter))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_factory(view_info_manager: ViewInfoManagerRef) -> Initializer<TableId, Arc<ViewInfoValue>> {
|
||||||
|
Arc::new(move |view_id| {
|
||||||
|
let view_info_manager = view_info_manager.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let view_info = view_info_manager
|
||||||
|
.get(*view_id)
|
||||||
|
.await?
|
||||||
|
.context(error::ValueNotExistSnafu {})?
|
||||||
|
.into_inner();
|
||||||
|
|
||||||
|
Ok(Some(Arc::new(view_info)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn invalidator<'a>(
|
||||||
|
cache: &'a Cache<TableId, Arc<ViewInfoValue>>,
|
||||||
|
ident: &'a CacheIdent,
|
||||||
|
) -> BoxFuture<'a, Result<()>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if let CacheIdent::TableId(table_id) = ident {
|
||||||
|
cache.invalidate(table_id).await
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn filter(ident: &CacheIdent) -> bool {
|
||||||
|
matches!(ident, CacheIdent::TableId(_))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use moka::future::CacheBuilder;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::ddl::tests::create_view::test_create_view_task;
|
||||||
|
use crate::key::TableMetadataManager;
|
||||||
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_view_info_cache() {
|
||||||
|
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||||
|
let table_metadata_manager = TableMetadataManager::new(mem_kv.clone());
|
||||||
|
let cache = CacheBuilder::new(128).build();
|
||||||
|
let cache = new_view_info_cache("test".to_string(), cache, mem_kv.clone());
|
||||||
|
|
||||||
|
let result = cache.get(1024).await.unwrap();
|
||||||
|
assert!(result.is_none());
|
||||||
|
let mut task = test_create_view_task("my_view");
|
||||||
|
let table_names = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
};
|
||||||
|
|
||||||
|
task.view_info.ident.table_id = 1024;
|
||||||
|
table_metadata_manager
|
||||||
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
table_names,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let view_info = cache.get(1024).await.unwrap().unwrap();
|
||||||
|
assert_eq!(view_info.view_info, task.create_view.logical_plan);
|
||||||
|
assert_eq!(
|
||||||
|
view_info.table_names,
|
||||||
|
task.create_view
|
||||||
|
.table_names
|
||||||
|
.iter()
|
||||||
|
.map(|t| t.clone().into())
|
||||||
|
.collect::<HashSet<_>>()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(cache.contains_key(&1024));
|
||||||
|
cache
|
||||||
|
.invalidate(&[CacheIdent::TableId(1024)])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(!cache.contains_key(&1024));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -48,7 +48,7 @@ pub mod table_meta;
|
|||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub mod test_util;
|
pub mod test_util;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
pub(crate) mod tests;
|
||||||
pub mod truncate_table;
|
pub mod truncate_table;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,10 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use table::metadata::RawTableInfo;
|
use table::metadata::RawTableInfo;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
impl AlterLogicalTablesProcedure {
|
impl AlterLogicalTablesProcedure {
|
||||||
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
|
||||||
|
|||||||
@@ -18,13 +18,13 @@ use common_telemetry::{info, warn};
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache_invalidator::Context;
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||||
use crate::ddl::physical_table_metadata;
|
use crate::ddl::physical_table_metadata;
|
||||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||||
use crate::instruction::CacheIdent;
|
use crate::instruction::CacheIdent;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
impl CreateLogicalTablesProcedure {
|
impl CreateLogicalTablesProcedure {
|
||||||
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
||||||
|
|||||||
@@ -22,9 +22,11 @@ use strum::AsRefStr;
|
|||||||
use table::metadata::{RawTableInfo, TableId, TableType};
|
use table::metadata::{RawTableInfo, TableId, TableType};
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::utils::handle_retry_error;
|
use crate::ddl::utils::handle_retry_error;
|
||||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||||
use crate::rpc::ddl::CreateViewTask;
|
use crate::rpc::ddl::CreateViewTask;
|
||||||
@@ -157,6 +159,25 @@ impl CreateViewProcedure {
|
|||||||
Ok(Status::executing(true))
|
Ok(Status::executing(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn invalidate_view_cache(&self) -> Result<()> {
|
||||||
|
let cache_invalidator = &self.context.cache_invalidator;
|
||||||
|
let ctx = Context {
|
||||||
|
subject: Some("Invalidate view cache by creating view".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
cache_invalidator
|
||||||
|
.invalidate(
|
||||||
|
&ctx,
|
||||||
|
&[
|
||||||
|
CacheIdent::TableName(self.data.table_ref().into()),
|
||||||
|
CacheIdent::TableId(self.view_id()),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates view metadata
|
/// Creates view metadata
|
||||||
///
|
///
|
||||||
/// Abort(not-retry):
|
/// Abort(not-retry):
|
||||||
@@ -175,15 +196,21 @@ impl CreateViewProcedure {
|
|||||||
view_name: self.data.table_ref().to_string(),
|
view_name: self.data.table_ref().to_string(),
|
||||||
})?;
|
})?;
|
||||||
let new_logical_plan = self.data.task.raw_logical_plan().clone();
|
let new_logical_plan = self.data.task.raw_logical_plan().clone();
|
||||||
|
let table_names = self.data.task.table_names();
|
||||||
|
|
||||||
manager
|
manager
|
||||||
.update_view_info(view_id, ¤t_view_info, new_logical_plan)
|
.update_view_info(view_id, ¤t_view_info, new_logical_plan, table_names)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!("Updated view metadata for view {view_id}");
|
info!("Updated view metadata for view {view_id}");
|
||||||
} else {
|
} else {
|
||||||
let raw_view_info = self.view_info().clone();
|
let raw_view_info = self.view_info().clone();
|
||||||
manager
|
manager
|
||||||
.create_view_metadata(raw_view_info, self.data.task.raw_logical_plan())
|
.create_view_metadata(
|
||||||
|
raw_view_info,
|
||||||
|
self.data.task.raw_logical_plan().clone(),
|
||||||
|
self.data.task.table_names(),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@@ -191,6 +218,7 @@ impl CreateViewProcedure {
|
|||||||
ctx.procedure_id
|
ctx.procedure_id
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
self.invalidate_view_cache().await?;
|
||||||
|
|
||||||
Ok(Status::done_with_output(view_id))
|
Ok(Status::done_with_output(view_id))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,19 +14,23 @@
|
|||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
|
||||||
|
use common_catalog::format_full_table_name;
|
||||||
use common_procedure::Status;
|
use common_procedure::Status;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use table::metadata::TableId;
|
use snafu::OptionExt;
|
||||||
|
use table::metadata::{TableId, TableType};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::executor::DropDatabaseExecutor;
|
use super::executor::DropDatabaseExecutor;
|
||||||
use super::metadata::DropDatabaseRemoveMetadata;
|
use super::metadata::DropDatabaseRemoveMetadata;
|
||||||
use super::DropTableTarget;
|
use super::DropTableTarget;
|
||||||
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
use crate::ddl::drop_database::{DropDatabaseContext, State};
|
||||||
use crate::ddl::DdlContext;
|
use crate::ddl::DdlContext;
|
||||||
use crate::error::Result;
|
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||||
|
use crate::instruction::CacheIdent;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct DropDatabaseCursor {
|
pub(crate) struct DropDatabaseCursor {
|
||||||
@@ -101,6 +105,40 @@ impl DropDatabaseCursor {
|
|||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_view(
|
||||||
|
&self,
|
||||||
|
ddl_ctx: &DdlContext,
|
||||||
|
ctx: &mut DropDatabaseContext,
|
||||||
|
table_name: String,
|
||||||
|
table_id: TableId,
|
||||||
|
) -> Result<(Box<dyn State>, Status)> {
|
||||||
|
let view_name = TableName::new(&ctx.catalog, &ctx.schema, &table_name);
|
||||||
|
ddl_ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.destroy_view_info(table_id, &view_name)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let cache_invalidator = &ddl_ctx.cache_invalidator;
|
||||||
|
let ctx = Context {
|
||||||
|
subject: Some("Invalidate table cache by dropping table".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
cache_invalidator
|
||||||
|
.invalidate(
|
||||||
|
&ctx,
|
||||||
|
&[
|
||||||
|
CacheIdent::TableName(view_name),
|
||||||
|
CacheIdent::TableId(table_id),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
Box::new(DropDatabaseCursor::new(self.target)),
|
||||||
|
Status::executing(false),
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -122,6 +160,20 @@ impl State for DropDatabaseCursor {
|
|||||||
match ctx.tables.as_mut().unwrap().try_next().await? {
|
match ctx.tables.as_mut().unwrap().try_next().await? {
|
||||||
Some((table_name, table_name_value)) => {
|
Some((table_name, table_name_value)) => {
|
||||||
let table_id = table_name_value.table_id();
|
let table_id = table_name_value.table_id();
|
||||||
|
|
||||||
|
let table_info_value = ddl_ctx
|
||||||
|
.table_metadata_manager
|
||||||
|
.table_info_manager()
|
||||||
|
.get(table_id)
|
||||||
|
.await?
|
||||||
|
.with_context(|| TableInfoNotFoundSnafu {
|
||||||
|
table: format_full_table_name(&ctx.catalog, &ctx.schema, &table_name),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if table_info_value.table_info.table_type == TableType::View {
|
||||||
|
return self.handle_view(ddl_ctx, ctx, table_name, table_id).await;
|
||||||
|
}
|
||||||
|
|
||||||
match ddl_ctx
|
match ddl_ctx
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.table_route_manager()
|
.table_route_manager()
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use common_telemetry::info;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::cursor::DropDatabaseCursor;
|
use super::cursor::DropDatabaseCursor;
|
||||||
use super::{DropDatabaseContext, DropTableTarget};
|
use super::{DropDatabaseContext, DropTableTarget};
|
||||||
@@ -29,7 +30,6 @@ use crate::error::{self, Result};
|
|||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::region_keeper::OperatingRegionGuard;
|
use crate::region_keeper::OperatingRegionGuard;
|
||||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct DropDatabaseExecutor {
|
pub(crate) struct DropDatabaseExecutor {
|
||||||
@@ -131,10 +131,12 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::RegionRequest;
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
use common_query::request::QueryRequest;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
use crate::ddl::drop_database::cursor::DropDatabaseCursor;
|
||||||
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
use crate::ddl::drop_database::executor::DropDatabaseExecutor;
|
||||||
@@ -144,7 +146,6 @@ mod tests {
|
|||||||
use crate::key::datanode_table::DatanodeTableKey;
|
use crate::key::datanode_table::DatanodeTableKey;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::rpc::router::region_distribution;
|
use crate::rpc::router::region_distribution;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ use futures::future::join_all;
|
|||||||
use snafu::ensure;
|
use snafu::ensure;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::cache_invalidator::Context;
|
use crate::cache_invalidator::Context;
|
||||||
use crate::ddl::utils::add_peer_context_if_needed;
|
use crate::ddl::utils::add_peer_context_if_needed;
|
||||||
@@ -32,7 +33,6 @@ use crate::instruction::CacheIdent;
|
|||||||
use crate::key::table_name::TableNameKey;
|
use crate::key::table_name::TableNameKey;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
/// [Control] indicated to the caller whether to go to the next step.
|
/// [Control] indicated to the caller whether to go to the next step.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -224,6 +224,7 @@ mod tests {
|
|||||||
use api::v1::{ColumnDataType, SemanticType};
|
use api::v1::{ColumnDataType, SemanticType};
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use table::metadata::RawTableInfo;
|
use table::metadata::RawTableInfo;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||||
@@ -231,7 +232,6 @@ mod tests {
|
|||||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||||
};
|
};
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
|
|
||||||
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
|
fn test_create_raw_table_info(name: &str) -> RawTableInfo {
|
||||||
|
|||||||
@@ -13,9 +13,10 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::region::RegionResponse;
|
use api::region::RegionResponse;
|
||||||
use api::v1::region::{QueryRequest, RegionRequest};
|
use api::v1::region::RegionRequest;
|
||||||
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
|
use common_query::request::QueryRequest;
|
||||||
use common_recordbatch::SendableRecordBatchStream;
|
use common_recordbatch::SendableRecordBatchStream;
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use snafu::{ResultExt, Snafu};
|
use snafu::{ResultExt, Snafu};
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ mod alter_table;
|
|||||||
mod create_flow;
|
mod create_flow;
|
||||||
mod create_logical_tables;
|
mod create_logical_tables;
|
||||||
mod create_table;
|
mod create_table;
|
||||||
mod create_view;
|
pub(crate) mod create_view;
|
||||||
mod drop_database;
|
mod drop_database;
|
||||||
mod drop_flow;
|
mod drop_flow;
|
||||||
mod drop_table;
|
mod drop_table;
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ use std::sync::Arc;
|
|||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_procedure_test::execute_procedure_until_done;
|
use common_procedure_test::execute_procedure_until_done;
|
||||||
use session::context::QueryContext;
|
use session::context::QueryContext;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::create_flow::CreateFlowProcedure;
|
use crate::ddl::create_flow::CreateFlowProcedure;
|
||||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||||
@@ -27,7 +28,6 @@ use crate::ddl::DdlContext;
|
|||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
use crate::rpc::ddl::CreateFlowTask;
|
use crate::rpc::ddl::CreateFlowTask;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||||
use crate::{error, ClusterId};
|
use crate::{error, ClusterId};
|
||||||
|
|
||||||
|
|||||||
@@ -13,9 +13,10 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::CreateViewExpr;
|
use api::v1::{CreateViewExpr, TableName};
|
||||||
use common_error::ext::ErrorExt;
|
use common_error::ext::ErrorExt;
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||||
@@ -31,7 +32,35 @@ use crate::error::Error;
|
|||||||
use crate::rpc::ddl::CreateViewTask;
|
use crate::rpc::ddl::CreateViewTask;
|
||||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||||
|
|
||||||
fn test_create_view_task(name: &str) -> CreateViewTask {
|
fn test_table_names() -> HashSet<table::table_name::TableName> {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(table::table_name::TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(table::table_name::TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||||
|
let table_names = vec![
|
||||||
|
TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
},
|
||||||
|
TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
let expr = CreateViewExpr {
|
let expr = CreateViewExpr {
|
||||||
catalog_name: "greptime".to_string(),
|
catalog_name: "greptime".to_string(),
|
||||||
schema_name: "public".to_string(),
|
schema_name: "public".to_string(),
|
||||||
@@ -39,6 +68,7 @@ fn test_create_view_task(name: &str) -> CreateViewTask {
|
|||||||
or_replace: false,
|
or_replace: false,
|
||||||
create_if_not_exists: false,
|
create_if_not_exists: false,
|
||||||
logical_plan: vec![1, 2, 3],
|
logical_plan: vec![1, 2, 3],
|
||||||
|
table_names,
|
||||||
};
|
};
|
||||||
|
|
||||||
let view_info = RawTableInfo {
|
let view_info = RawTableInfo {
|
||||||
@@ -70,7 +100,11 @@ async fn test_on_prepare_view_exists_err() {
|
|||||||
// Puts a value to table name key.
|
// Puts a value to table name key.
|
||||||
ddl_context
|
ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
test_table_names(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||||
@@ -90,7 +124,11 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
|||||||
// Puts a value to table name key.
|
// Puts a value to table name key.
|
||||||
ddl_context
|
ddl_context
|
||||||
.table_metadata_manager
|
.table_metadata_manager
|
||||||
.create_view_metadata(task.view_info.clone(), &task.create_view.logical_plan)
|
.create_view_metadata(
|
||||||
|
task.view_info.clone(),
|
||||||
|
task.create_view.logical_plan.clone(),
|
||||||
|
test_table_names(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||||
use common_procedure_test::execute_procedure_until_done;
|
use common_procedure_test::execute_procedure_until_done;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::ddl::drop_flow::DropFlowProcedure;
|
use crate::ddl::drop_flow::DropFlowProcedure;
|
||||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||||
@@ -26,7 +27,6 @@ use crate::ddl::tests::create_flow::create_test_flow;
|
|||||||
use crate::error;
|
use crate::error;
|
||||||
use crate::key::table_route::TableRouteValue;
|
use crate::key::table_route::TableRouteValue;
|
||||||
use crate::rpc::ddl::DropFlowTask;
|
use crate::rpc::ddl::DropFlowTask;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||||
|
|
||||||
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {
|
fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask {
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use snafu::{ensure, ResultExt};
|
|||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use strum::AsRefStr;
|
use strum::AsRefStr;
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table::table_reference::TableReference;
|
use table::table_reference::TableReference;
|
||||||
|
|
||||||
use super::utils::handle_retry_error;
|
use super::utils::handle_retry_error;
|
||||||
@@ -40,7 +41,6 @@ use crate::key::DeserializedValueWithBytes;
|
|||||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||||
use crate::rpc::ddl::TruncateTableTask;
|
use crate::rpc::ddl::TruncateTableTask;
|
||||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::{metrics, ClusterId};
|
use crate::{metrics, ClusterId};
|
||||||
|
|
||||||
pub struct TruncateTableProcedure {
|
pub struct TruncateTableProcedure {
|
||||||
|
|||||||
@@ -489,8 +489,7 @@ async fn handle_create_table_task(
|
|||||||
|
|
||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_id: Some(table_id),
|
table_ids: vec![table_id],
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,7 +533,6 @@ async fn handle_create_logical_table_tasks(
|
|||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_ids,
|
table_ids,
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -690,8 +688,7 @@ async fn handle_create_view_task(
|
|||||||
|
|
||||||
Ok(SubmitDdlTaskResponse {
|
Ok(SubmitDdlTaskResponse {
|
||||||
key: procedure_id.into(),
|
key: procedure_id.into(),
|
||||||
table_id: Some(view_id),
|
table_ids: vec![view_id],
|
||||||
..Default::default()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ use serde::{Deserialize, Serialize};
|
|||||||
use store_api::storage::{RegionId, RegionNumber};
|
use store_api::storage::{RegionId, RegionNumber};
|
||||||
use strum::Display;
|
use strum::Display;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::flow_name::FlowName;
|
use crate::flow_name::FlowName;
|
||||||
use crate::key::schema_name::SchemaName;
|
use crate::key::schema_name::SchemaName;
|
||||||
use crate::key::FlowId;
|
use crate::key::FlowId;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||||
|
|
||||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
|||||||
@@ -89,9 +89,6 @@ pub mod flow;
|
|||||||
pub mod schema_name;
|
pub mod schema_name;
|
||||||
pub mod table_info;
|
pub mod table_info;
|
||||||
pub mod table_name;
|
pub mod table_name;
|
||||||
// TODO(weny): removes it.
|
|
||||||
#[allow(deprecated)]
|
|
||||||
pub mod table_region;
|
|
||||||
pub mod view_info;
|
pub mod view_info;
|
||||||
// TODO(weny): removes it.
|
// TODO(weny): removes it.
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
@@ -119,6 +116,7 @@ use serde::{Deserialize, Serialize};
|
|||||||
use snafu::{ensure, OptionExt, ResultExt};
|
use snafu::{ensure, OptionExt, ResultExt};
|
||||||
use store_api::storage::RegionNumber;
|
use store_api::storage::RegionNumber;
|
||||||
use table::metadata::{RawTableInfo, TableId};
|
use table::metadata::{RawTableInfo, TableId};
|
||||||
|
use table::table_name::TableName;
|
||||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||||
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
||||||
@@ -138,14 +136,12 @@ use crate::kv_backend::txn::{Txn, TxnOp};
|
|||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
|
||||||
use crate::rpc::store::BatchDeleteRequest;
|
use crate::rpc::store::BatchDeleteRequest;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::DatanodeId;
|
use crate::DatanodeId;
|
||||||
|
|
||||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
|
||||||
pub const MAINTENANCE_KEY: &str = "maintenance";
|
pub const MAINTENANCE_KEY: &str = "maintenance";
|
||||||
|
|
||||||
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||||
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
|
|
||||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||||
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
pub const VIEW_INFO_KEY_PREFIX: &str = "__view_info";
|
||||||
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
|
||||||
@@ -490,7 +486,8 @@ impl TableMetadataManager {
|
|||||||
pub async fn create_view_metadata(
|
pub async fn create_view_metadata(
|
||||||
&self,
|
&self,
|
||||||
view_info: RawTableInfo,
|
view_info: RawTableInfo,
|
||||||
raw_logical_plan: &Vec<u8>,
|
raw_logical_plan: Vec<u8>,
|
||||||
|
table_names: HashSet<TableName>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let view_id = view_info.ident.table_id;
|
let view_id = view_info.ident.table_id;
|
||||||
|
|
||||||
@@ -512,7 +509,7 @@ impl TableMetadataManager {
|
|||||||
.build_create_txn(view_id, &table_info_value)?;
|
.build_create_txn(view_id, &table_info_value)?;
|
||||||
|
|
||||||
// Creates view info
|
// Creates view info
|
||||||
let view_info_value = ViewInfoValue::new(raw_logical_plan);
|
let view_info_value = ViewInfoValue::new(raw_logical_plan, table_names);
|
||||||
let (create_view_info_txn, on_create_view_info_failure) = self
|
let (create_view_info_txn, on_create_view_info_failure) = self
|
||||||
.view_info_manager()
|
.view_info_manager()
|
||||||
.build_create_txn(view_id, &view_info_value)?;
|
.build_create_txn(view_id, &view_info_value)?;
|
||||||
@@ -804,6 +801,33 @@ impl TableMetadataManager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn view_info_keys(&self, view_id: TableId, view_name: &TableName) -> Result<Vec<Vec<u8>>> {
|
||||||
|
let mut keys = Vec::with_capacity(3);
|
||||||
|
let view_name = TableNameKey::new(
|
||||||
|
&view_name.catalog_name,
|
||||||
|
&view_name.schema_name,
|
||||||
|
&view_name.table_name,
|
||||||
|
);
|
||||||
|
let table_info_key = TableInfoKey::new(view_id);
|
||||||
|
let view_info_key = ViewInfoKey::new(view_id);
|
||||||
|
keys.push(view_name.to_bytes());
|
||||||
|
keys.push(table_info_key.to_bytes());
|
||||||
|
keys.push(view_info_key.to_bytes());
|
||||||
|
|
||||||
|
Ok(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes metadata for view **permanently**.
|
||||||
|
/// The caller MUST ensure it has the exclusive access to `ViewNameKey`.
|
||||||
|
pub async fn destroy_view_info(&self, view_id: TableId, view_name: &TableName) -> Result<()> {
|
||||||
|
let keys = self.view_info_keys(view_id, view_name)?;
|
||||||
|
let _ = self
|
||||||
|
.kv_backend
|
||||||
|
.batch_delete(BatchDeleteRequest::new().with_keys(keys))
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Renames the table name and returns an error if different metadata exists.
|
/// Renames the table name and returns an error if different metadata exists.
|
||||||
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
|
/// The caller MUST ensure it has the exclusive access to old and new `TableNameKey`s,
|
||||||
/// and the new `TableNameKey` MUST be empty.
|
/// and the new `TableNameKey` MUST be empty.
|
||||||
@@ -903,8 +927,9 @@ impl TableMetadataManager {
|
|||||||
view_id: TableId,
|
view_id: TableId,
|
||||||
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
|
current_view_info_value: &DeserializedValueWithBytes<ViewInfoValue>,
|
||||||
new_view_info: Vec<u8>,
|
new_view_info: Vec<u8>,
|
||||||
|
table_names: HashSet<TableName>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let new_view_info_value = current_view_info_value.update(new_view_info);
|
let new_view_info_value = current_view_info_value.update(new_view_info, table_names);
|
||||||
|
|
||||||
// Updates view info.
|
// Updates view info.
|
||||||
let (update_view_info_txn, on_update_view_info_failure) = self
|
let (update_view_info_txn, on_update_view_info_failure) = self
|
||||||
@@ -1174,7 +1199,7 @@ impl_optional_meta_value! {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
@@ -1183,6 +1208,7 @@ mod tests {
|
|||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::metadata::{RawTableInfo, TableInfo};
|
use table::metadata::{RawTableInfo, TableInfo};
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::datanode_table::DatanodeTableKey;
|
use super::datanode_table::DatanodeTableKey;
|
||||||
use super::test_utils;
|
use super::test_utils;
|
||||||
@@ -1197,7 +1223,6 @@ mod tests {
|
|||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::peer::Peer;
|
use crate::peer::Peer;
|
||||||
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
|
||||||
use crate::table_name::TableName;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_deserialized_value_with_bytes() {
|
fn test_deserialized_value_with_bytes() {
|
||||||
@@ -1250,6 +1275,21 @@ mod tests {
|
|||||||
test_utils::new_test_table_info(10, region_numbers)
|
test_utils::new_test_table_info(10, region_numbers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_test_table_names() -> HashSet<TableName> {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "a_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
async fn create_physical_table_metadata(
|
async fn create_physical_table_metadata(
|
||||||
table_metadata_manager: &TableMetadataManager,
|
table_metadata_manager: &TableMetadataManager,
|
||||||
table_info: RawTableInfo,
|
table_info: RawTableInfo,
|
||||||
@@ -1961,9 +2001,11 @@ mod tests {
|
|||||||
|
|
||||||
let logical_plan: Vec<u8> = vec![1, 2, 3];
|
let logical_plan: Vec<u8> = vec![1, 2, 3];
|
||||||
|
|
||||||
|
let table_names = new_test_table_names();
|
||||||
|
|
||||||
// Create metadata
|
// Create metadata
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.create_view_metadata(view_info.clone(), &logical_plan)
|
.create_view_metadata(view_info.clone(), logical_plan.clone(), table_names.clone())
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -1977,6 +2019,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(current_view_info.view_info, logical_plan);
|
assert_eq!(current_view_info.view_info, logical_plan);
|
||||||
|
assert_eq!(current_view_info.table_names, table_names);
|
||||||
// assert table info
|
// assert table info
|
||||||
let current_table_info = table_metadata_manager
|
let current_table_info = table_metadata_manager
|
||||||
.table_info_manager()
|
.table_info_manager()
|
||||||
@@ -1989,16 +2032,43 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
|
let new_logical_plan: Vec<u8> = vec![4, 5, 6];
|
||||||
let current_view_info_value =
|
let new_table_names = {
|
||||||
DeserializedValueWithBytes::from_inner(ViewInfoValue::new(&logical_plan));
|
let mut set = HashSet::new();
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "b_table".to_string(),
|
||||||
|
});
|
||||||
|
set.insert(TableName {
|
||||||
|
catalog_name: "greptime".to_string(),
|
||||||
|
schema_name: "public".to_string(),
|
||||||
|
table_name: "c_table".to_string(),
|
||||||
|
});
|
||||||
|
set
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_view_info_value = DeserializedValueWithBytes::from_inner(ViewInfoValue::new(
|
||||||
|
logical_plan.clone(),
|
||||||
|
table_names,
|
||||||
|
));
|
||||||
// should be ok.
|
// should be ok.
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
¤t_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// if table info was updated, it should be ok.
|
// if table info was updated, it should be ok.
|
||||||
table_metadata_manager
|
table_metadata_manager
|
||||||
.update_view_info(view_id, ¤t_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
¤t_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@@ -2011,14 +2081,21 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(updated_view_info.view_info, new_logical_plan);
|
assert_eq!(updated_view_info.view_info, new_logical_plan);
|
||||||
|
assert_eq!(updated_view_info.table_names, new_table_names);
|
||||||
|
|
||||||
let wrong_view_info = logical_plan.clone();
|
let wrong_view_info = logical_plan.clone();
|
||||||
let wrong_view_info_value =
|
let wrong_view_info_value = DeserializedValueWithBytes::from_inner(
|
||||||
DeserializedValueWithBytes::from_inner(current_view_info_value.update(wrong_view_info));
|
current_view_info_value.update(wrong_view_info, new_table_names.clone()),
|
||||||
|
);
|
||||||
// if the current_view_info_value is wrong, it should return an error.
|
// if the current_view_info_value is wrong, it should return an error.
|
||||||
// The ABA problem.
|
// The ABA problem.
|
||||||
assert!(table_metadata_manager
|
assert!(table_metadata_manager
|
||||||
.update_view_info(view_id, &wrong_view_info_value, new_logical_plan.clone())
|
.update_view_info(
|
||||||
|
view_id,
|
||||||
|
&wrong_view_info_value,
|
||||||
|
new_logical_plan.clone(),
|
||||||
|
new_table_names.clone(),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
@@ -2031,5 +2108,6 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner();
|
.into_inner();
|
||||||
assert_eq!(current_view_info.view_info, new_logical_plan);
|
assert_eq!(current_view_info.view_info, new_logical_plan);
|
||||||
|
assert_eq!(current_view_info.table_names, new_table_names);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,12 +72,8 @@ impl DatanodeTableKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prefix(datanode_id: DatanodeId) -> String {
|
pub fn prefix(datanode_id: DatanodeId) -> String {
|
||||||
format!("{}/{datanode_id}", DATANODE_TABLE_KEY_PREFIX)
|
format!("{}/{datanode_id}/", DATANODE_TABLE_KEY_PREFIX)
|
||||||
}
|
|
||||||
|
|
||||||
pub fn range_start_key(datanode_id: DatanodeId) -> String {
|
|
||||||
format!("{}/", Self::prefix(datanode_id))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +110,7 @@ impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey {
|
|||||||
|
|
||||||
impl Display for DatanodeTableKey {
|
impl Display for DatanodeTableKey {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(f, "{}/{}", Self::prefix(self.datanode_id), self.table_id)
|
write!(f, "{}{}", Self::prefix(self.datanode_id), self.table_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,7 +160,7 @@ impl DatanodeTableManager {
|
|||||||
&self,
|
&self,
|
||||||
datanode_id: DatanodeId,
|
datanode_id: DatanodeId,
|
||||||
) -> BoxStream<'static, Result<DatanodeTableValue>> {
|
) -> BoxStream<'static, Result<DatanodeTableValue>> {
|
||||||
let start_key = DatanodeTableKey::range_start_key(datanode_id);
|
let start_key = DatanodeTableKey::prefix(datanode_id);
|
||||||
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
|
let req = RangeRequest::new().with_prefix(start_key.as_bytes());
|
||||||
|
|
||||||
let stream = PaginationStream::new(
|
let stream = PaginationStream::new(
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub(crate) mod flow_info;
|
pub mod flow_info;
|
||||||
pub(crate) mod flow_name;
|
pub(crate) mod flow_name;
|
||||||
pub(crate) mod flownode_flow;
|
pub(crate) mod flownode_flow;
|
||||||
pub(crate) mod table_flow;
|
pub(crate) mod table_flow;
|
||||||
@@ -262,12 +262,12 @@ mod tests {
|
|||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::key::flow::table_flow::TableFlowKey;
|
use crate::key::flow::table_flow::TableFlowKey;
|
||||||
use crate::key::FlowPartitionId;
|
use crate::key::FlowPartitionId;
|
||||||
use crate::kv_backend::memory::MemoryKvBackend;
|
use crate::kv_backend::memory::MemoryKvBackend;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::FlownodeId;
|
use crate::FlownodeId;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ use regex::Regex;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::OptionExt;
|
use snafu::OptionExt;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
|
use table::table_name::TableName;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::key::flow::FlowScoped;
|
use crate::key::flow::FlowScoped;
|
||||||
@@ -27,7 +28,6 @@ use crate::key::txn_helper::TxnOpGetResponseSet;
|
|||||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue};
|
||||||
use crate::kv_backend::txn::Txn;
|
use crate::kv_backend::txn::Txn;
|
||||||
use crate::kv_backend::KvBackendRef;
|
use crate::kv_backend::KvBackendRef;
|
||||||
use crate::table_name::TableName;
|
|
||||||
use crate::FlownodeId;
|
use crate::FlownodeId;
|
||||||
|
|
||||||
const FLOW_INFO_KEY_PREFIX: &str = "info";
|
const FLOW_INFO_KEY_PREFIX: &str = "info";
|
||||||
@@ -141,6 +141,26 @@ impl FlowInfoValue {
|
|||||||
pub fn source_table_ids(&self) -> &[TableId] {
|
pub fn source_table_ids(&self) -> &[TableId] {
|
||||||
&self.source_table_ids
|
&self.source_table_ids
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn flow_name(&self) -> &String {
|
||||||
|
&self.flow_name
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sink_table_name(&self) -> &TableName {
|
||||||
|
&self.sink_table_name
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn raw_sql(&self) -> &String {
|
||||||
|
&self.raw_sql
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn expire_after(&self) -> Option<i64> {
|
||||||
|
self.expire_after
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn comment(&self) -> &String {
|
||||||
|
&self.comment
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;
|
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user